2024-12-02 21:26:50,650 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-02 21:26:50,661 main DEBUG Took 0.009448 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-02 21:26:50,662 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-02 21:26:50,662 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-02 21:26:50,663 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-02 21:26:50,664 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,671 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-02 21:26:50,682 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,683 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,684 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,684 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,684 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,685 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,685 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,686 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,686 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,686 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,687 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,687 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,688 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,688 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,689 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,689 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,690 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,690 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,691 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,691 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,692 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,692 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,692 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,693 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:26:50,693 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,694 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-02 21:26:50,695 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:26:50,697 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-02 21:26:50,699 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-02 21:26:50,700 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-02 21:26:50,701 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-02 21:26:50,702 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-02 21:26:50,713 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-02 21:26:50,716 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-02 21:26:50,718 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-02 21:26:50,719 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-02 21:26:50,719 main DEBUG createAppenders(={Console}) 2024-12-02 21:26:50,720 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-02 21:26:50,721 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-02 21:26:50,721 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-02 21:26:50,722 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-02 21:26:50,722 main DEBUG OutputStream closed 2024-12-02 21:26:50,723 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-02 21:26:50,723 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-02 21:26:50,723 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-02 21:26:50,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-02 21:26:50,812 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-02 21:26:50,814 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-02 21:26:50,815 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-02 21:26:50,816 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-02 21:26:50,817 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-02 21:26:50,817 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-02 21:26:50,818 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-02 21:26:50,818 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-02 21:26:50,819 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-02 21:26:50,819 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-02 21:26:50,820 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-02 21:26:50,820 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-02 21:26:50,820 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-02 21:26:50,821 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-02 21:26:50,821 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-02 21:26:50,821 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-02 21:26:50,822 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-02 21:26:50,825 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02 21:26:50,825 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-02 21:26:50,826 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-02 21:26:50,826 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-02T21:26:51,070 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34 2024-12-02 21:26:51,073 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-02 21:26:51,073 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02T21:26:51,082 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-02T21:26:51,126 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=279, ProcessCount=11, AvailableMemoryMB=3668 2024-12-02T21:26:51,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:26:51,142 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc, deleteOnExit=true 2024-12-02T21:26:51,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T21:26:51,143 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/test.cache.data in system properties and HBase conf 2024-12-02T21:26:51,144 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:26:51,144 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:26:51,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:26:51,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:26:51,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T21:26:51,234 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-02T21:26:51,312 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:26:51,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:26:51,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:26:51,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:26:51,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:26:51,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:26:51,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:26:51,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:26:51,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:26:51,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:26:51,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:26:51,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:26:51,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:26:51,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:26:51,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:26:51,740 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:26:52,479 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-02T21:26:52,546 INFO [Time-limited test {}] log.Log(170): Logging initialized @2537ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-02T21:26:52,610 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:26:52,673 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:26:52,690 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:26:52,691 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:26:52,692 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:26:52,705 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:26:52,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:26:52,708 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:26:52,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/java.io.tmpdir/jetty-localhost-33239-hadoop-hdfs-3_4_1-tests_jar-_-any-4830339012322628892/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:26:52,897 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:33239} 2024-12-02T21:26:52,898 INFO [Time-limited test {}] server.Server(415): Started @2890ms 2024-12-02T21:26:52,919 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:26:53,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:26:53,440 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:26:53,440 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:26:53,441 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:26:53,441 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:26:53,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:26:53,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:26:53,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/java.io.tmpdir/jetty-localhost-39175-hadoop-hdfs-3_4_1-tests_jar-_-any-11155852175544172437/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:26:53,535 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:39175} 2024-12-02T21:26:53,536 INFO [Time-limited test {}] server.Server(415): Started @3528ms 2024-12-02T21:26:53,582 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:26:53,674 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:26:53,679 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:26:53,681 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:26:53,681 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:26:53,681 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:26:53,682 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:26:53,683 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:26:53,785 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/java.io.tmpdir/jetty-localhost-36827-hadoop-hdfs-3_4_1-tests_jar-_-any-10264309984122089851/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:26:53,786 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:36827} 2024-12-02T21:26:53,786 INFO [Time-limited test {}] server.Server(415): Started @3779ms 2024-12-02T21:26:53,788 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:26:54,761 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc/data/data3/current/BP-619167935-172.17.0.3-1733174811821/current, will proceed with Du for space computation calculation, 2024-12-02T21:26:54,761 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc/data/data4/current/BP-619167935-172.17.0.3-1733174811821/current, will proceed with Du for space computation calculation, 2024-12-02T21:26:54,761 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc/data/data2/current/BP-619167935-172.17.0.3-1733174811821/current, will proceed with Du for space computation calculation, 2024-12-02T21:26:54,761 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc/data/data1/current/BP-619167935-172.17.0.3-1733174811821/current, will proceed with Du for space computation calculation, 2024-12-02T21:26:54,795 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:26:54,795 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:26:54,839 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x177ba0615b25b62c with lease ID 0x30a212aeae8ed33f: Processing first storage report for DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de from datanode DatanodeRegistration(127.0.0.1:37419, datanodeUuid=9f6b0d9f-051f-4740-ad58-013776d1c904, infoPort=35645, infoSecurePort=0, ipcPort=40301, storageInfo=lv=-57;cid=testClusterID;nsid=1170731739;c=1733174811821) 2024-12-02T21:26:54,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x177ba0615b25b62c with lease ID 0x30a212aeae8ed33f: from storage DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de node DatanodeRegistration(127.0.0.1:37419, datanodeUuid=9f6b0d9f-051f-4740-ad58-013776d1c904, infoPort=35645, infoSecurePort=0, ipcPort=40301, storageInfo=lv=-57;cid=testClusterID;nsid=1170731739;c=1733174811821), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T21:26:54,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf967d7ce38cd3754 with lease ID 0x30a212aeae8ed33e: Processing first storage report for DS-1c94e1ee-5801-4061-9909-753fd4317f65 from datanode DatanodeRegistration(127.0.0.1:37999, datanodeUuid=d4213cd8-145b-4ac8-8e63-9e550960a701, infoPort=34859, infoSecurePort=0, ipcPort=39137, storageInfo=lv=-57;cid=testClusterID;nsid=1170731739;c=1733174811821) 2024-12-02T21:26:54,841 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf967d7ce38cd3754 with lease ID 0x30a212aeae8ed33e: from storage DS-1c94e1ee-5801-4061-9909-753fd4317f65 node DatanodeRegistration(127.0.0.1:37999, datanodeUuid=d4213cd8-145b-4ac8-8e63-9e550960a701, infoPort=34859, infoSecurePort=0, ipcPort=39137, storageInfo=lv=-57;cid=testClusterID;nsid=1170731739;c=1733174811821), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:26:54,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x177ba0615b25b62c with lease ID 0x30a212aeae8ed33f: Processing first storage report for DS-341a6547-bf89-4ace-963e-7a5b1bba143b from datanode DatanodeRegistration(127.0.0.1:37419, datanodeUuid=9f6b0d9f-051f-4740-ad58-013776d1c904, infoPort=35645, infoSecurePort=0, ipcPort=40301, storageInfo=lv=-57;cid=testClusterID;nsid=1170731739;c=1733174811821) 2024-12-02T21:26:54,841 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x177ba0615b25b62c with lease ID 0x30a212aeae8ed33f: from storage DS-341a6547-bf89-4ace-963e-7a5b1bba143b node DatanodeRegistration(127.0.0.1:37419, datanodeUuid=9f6b0d9f-051f-4740-ad58-013776d1c904, infoPort=35645, infoSecurePort=0, ipcPort=40301, storageInfo=lv=-57;cid=testClusterID;nsid=1170731739;c=1733174811821), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:26:54,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf967d7ce38cd3754 with lease ID 0x30a212aeae8ed33e: Processing first storage report for DS-fdcd3d72-6d76-4d73-a664-78eb9a40964d from datanode DatanodeRegistration(127.0.0.1:37999, datanodeUuid=d4213cd8-145b-4ac8-8e63-9e550960a701, infoPort=34859, infoSecurePort=0, ipcPort=39137, storageInfo=lv=-57;cid=testClusterID;nsid=1170731739;c=1733174811821) 2024-12-02T21:26:54,842 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf967d7ce38cd3754 with lease ID 0x30a212aeae8ed33e: from storage DS-fdcd3d72-6d76-4d73-a664-78eb9a40964d node DatanodeRegistration(127.0.0.1:37999, datanodeUuid=d4213cd8-145b-4ac8-8e63-9e550960a701, infoPort=34859, infoSecurePort=0, ipcPort=39137, storageInfo=lv=-57;cid=testClusterID;nsid=1170731739;c=1733174811821), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:26:54,893 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34 2024-12-02T21:26:54,952 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc/zookeeper_0, clientPort=58312, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:26:54,961 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58312 2024-12-02T21:26:54,970 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:26:54,972 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:26:55,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:26:55,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:26:55,599 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2 with version=8 2024-12-02T21:26:55,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/hbase-staging 2024-12-02T21:26:55,674 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-02T21:26:55,900 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:26:55,909 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:26:55,909 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:26:55,914 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:26:55,915 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:26:55,915 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:26:56,026 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T21:26:56,075 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-02T21:26:56,082 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-02T21:26:56,085 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:26:56,105 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 74034 (auto-detected) 2024-12-02T21:26:56,106 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-02T21:26:56,121 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35249 2024-12-02T21:26:56,138 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35249 connecting to ZooKeeper ensemble=127.0.0.1:58312 2024-12-02T21:26:56,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:352490x0, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:26:56,306 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35249-0x10197f0e4850000 connected 2024-12-02T21:26:56,399 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:26:56,404 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:26:56,412 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:26:56,415 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2, hbase.cluster.distributed=false 2024-12-02T21:26:56,436 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:26:56,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35249 2024-12-02T21:26:56,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35249 2024-12-02T21:26:56,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35249 2024-12-02T21:26:56,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35249 2024-12-02T21:26:56,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35249 2024-12-02T21:26:56,548 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:26:56,550 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:26:56,550 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:26:56,551 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:26:56,551 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:26:56,551 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:26:56,554 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:26:56,557 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:26:56,558 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38553 2024-12-02T21:26:56,560 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38553 connecting to ZooKeeper ensemble=127.0.0.1:58312 2024-12-02T21:26:56,562 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:26:56,567 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:26:56,584 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385530x0, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:26:56,585 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38553-0x10197f0e4850001 connected 2024-12-02T21:26:56,585 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:26:56,590 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:26:56,599 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:26:56,603 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:26:56,609 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:26:56,610 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38553 2024-12-02T21:26:56,610 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38553 2024-12-02T21:26:56,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38553 2024-12-02T21:26:56,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38553 2024-12-02T21:26:56,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38553 2024-12-02T21:26:56,630 DEBUG [M:0;87c3fdb6c570:35249 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87c3fdb6c570:35249 2024-12-02T21:26:56,631 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87c3fdb6c570,35249,1733174815752 2024-12-02T21:26:56,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:26:56,648 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:26:56,650 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87c3fdb6c570,35249,1733174815752 2024-12-02T21:26:56,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:56,679 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:26:56,679 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:56,680 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:26:56,681 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87c3fdb6c570,35249,1733174815752 from backup master directory 2024-12-02T21:26:56,689 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:26:56,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87c3fdb6c570,35249,1733174815752 2024-12-02T21:26:56,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:26:56,690 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:26:56,691 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87c3fdb6c570,35249,1733174815752 2024-12-02T21:26:56,693 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-02T21:26:56,694 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-02T21:26:56,747 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/hbase.id] with ID: 31685697-1bd4-4883-89ec-279a73b8d67c 2024-12-02T21:26:56,747 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/.tmp/hbase.id 2024-12-02T21:26:56,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:26:56,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:26:56,759 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/.tmp/hbase.id]:[hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/hbase.id] 2024-12-02T21:26:56,800 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:26:56,805 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T21:26:56,822 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-12-02T21:26:56,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:56,836 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:56,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:26:56,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:26:56,870 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:26:56,872 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:26:56,878 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:26:56,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:26:56,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:26:56,923 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store 2024-12-02T21:26:56,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:26:56,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:26:56,946 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-02T21:26:56,949 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:26:56,951 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:26:56,951 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:26:56,951 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:26:56,953 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:26:56,953 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:26:56,953 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:26:56,955 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733174816950Disabling compacts and flushes for region at 1733174816950Disabling writes for close at 1733174816953 (+3 ms)Writing region close event to WAL at 1733174816953Closed at 1733174816953 2024-12-02T21:26:56,957 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/.initializing 2024-12-02T21:26:56,957 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/WALs/87c3fdb6c570,35249,1733174815752 2024-12-02T21:26:56,979 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C35249%2C1733174815752, suffix=, logDir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/WALs/87c3fdb6c570,35249,1733174815752, archiveDir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/oldWALs, maxLogs=10 2024-12-02T21:26:56,988 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C35249%2C1733174815752.1733174816984 2024-12-02T21:26:57,009 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/WALs/87c3fdb6c570,35249,1733174815752/87c3fdb6c570%2C35249%2C1733174815752.1733174816984 2024-12-02T21:26:57,016 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35645:35645),(127.0.0.1/127.0.0.1:34859:34859)] 2024-12-02T21:26:57,017 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:26:57,018 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:26:57,021 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,022 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,082 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:26:57,086 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:57,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:26:57,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:26:57,092 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:57,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:26:57,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:26:57,096 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:57,097 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:26:57,098 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,100 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:26:57,100 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:57,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:26:57,101 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,105 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,107 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,113 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,113 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,117 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:26:57,121 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:26:57,125 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:26:57,126 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766250, jitterRate=-0.025662854313850403}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:26:57,131 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733174817033Initializing all the Stores at 1733174817035 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174817036 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174817036Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174817037 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174817037Cleaning up temporary data from old regions at 1733174817113 (+76 ms)Region opened successfully at 1733174817131 (+18 ms) 2024-12-02T21:26:57,133 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:26:57,163 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3843f8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:26:57,188 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T21:26:57,197 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:26:57,197 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:26:57,200 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:26:57,201 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-02T21:26:57,205 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-12-02T21:26:57,205 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:26:57,226 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:26:57,233 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:26:57,279 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:26:57,283 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:26:57,286 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:26:57,299 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:26:57,301 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:26:57,306 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:26:57,310 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:26:57,312 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:26:57,321 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:26:57,341 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:26:57,352 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:26:57,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:26:57,363 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:26:57,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:57,363 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:57,367 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87c3fdb6c570,35249,1733174815752, sessionid=0x10197f0e4850000, setting cluster-up flag (Was=false) 2024-12-02T21:26:57,394 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:57,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:57,427 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:26:57,432 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,35249,1733174815752 2024-12-02T21:26:57,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:57,458 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:57,489 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:26:57,491 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,35249,1733174815752 2024-12-02T21:26:57,502 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T21:26:57,518 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(746): ClusterId : 31685697-1bd4-4883-89ec-279a73b8d67c 2024-12-02T21:26:57,521 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:26:57,533 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:26:57,534 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:26:57,543 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:26:57,543 DEBUG [RS:0;87c3fdb6c570:38553 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cd6386f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:26:57,556 DEBUG [RS:0;87c3fdb6c570:38553 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87c3fdb6c570:38553 2024-12-02T21:26:57,559 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T21:26:57,559 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T21:26:57,559 DEBUG [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T21:26:57,561 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(2659): reportForDuty to master=87c3fdb6c570,35249,1733174815752 with port=38553, startcode=1733174816511 2024-12-02T21:26:57,568 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T21:26:57,571 DEBUG [RS:0;87c3fdb6c570:38553 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:26:57,576 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T21:26:57,582 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:26:57,589 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87c3fdb6c570,35249,1733174815752 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:26:57,598 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:26:57,598 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:26:57,599 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:26:57,599 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:26:57,599 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87c3fdb6c570:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:26:57,599 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,599 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:26:57,599 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,604 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:26:57,605 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:26:57,609 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733174847609 2024-12-02T21:26:57,610 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:57,611 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:26:57,611 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:26:57,613 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:26:57,616 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:26:57,617 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:26:57,618 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:26:57,618 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:26:57,620 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,624 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:26:57,625 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:26:57,625 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:26:57,630 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:26:57,631 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:26:57,633 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174817632,5,FailOnTimeoutGroup] 2024-12-02T21:26:57,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:26:57,636 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T21:26:57,637 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2 2024-12-02T21:26:57,640 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174817633,5,FailOnTimeoutGroup] 2024-12-02T21:26:57,645 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,646 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:26:57,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:26:57,648 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,650 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,652 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51381, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:26:57,661 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35249 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87c3fdb6c570,38553,1733174816511 2024-12-02T21:26:57,664 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35249 {}] master.ServerManager(517): Registering regionserver=87c3fdb6c570,38553,1733174816511 2024-12-02T21:26:57,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:26:57,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:26:57,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:26:57,676 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:26:57,679 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:26:57,679 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:57,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:26:57,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:26:57,681 DEBUG [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2 2024-12-02T21:26:57,681 DEBUG [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43567 2024-12-02T21:26:57,681 DEBUG [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T21:26:57,684 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:26:57,684 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:57,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:26:57,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:26:57,688 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:26:57,688 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:57,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:26:57,689 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:26:57,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:26:57,690 DEBUG [RS:0;87c3fdb6c570:38553 {}] zookeeper.ZKUtil(111): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87c3fdb6c570,38553,1733174816511 2024-12-02T21:26:57,690 WARN [RS:0;87c3fdb6c570:38553 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:26:57,690 INFO [RS:0;87c3fdb6c570:38553 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:26:57,690 DEBUG [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511 2024-12-02T21:26:57,693 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:26:57,694 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:57,695 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:26:57,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:26:57,697 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87c3fdb6c570,38553,1733174816511] 2024-12-02T21:26:57,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740 2024-12-02T21:26:57,698 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740 2024-12-02T21:26:57,701 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:26:57,701 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:26:57,702 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:26:57,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:26:57,709 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:26:57,710 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863348, jitterRate=0.09780476987361908}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:26:57,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733174817673Initializing all the Stores at 1733174817675 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174817675Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174817676 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174817676Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174817676Cleaning up temporary data from old regions at 1733174817701 (+25 ms)Region opened successfully at 1733174817713 (+12 ms) 2024-12-02T21:26:57,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:26:57,714 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:26:57,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:26:57,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:26:57,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:26:57,716 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:26:57,717 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733174817714Disabling compacts and flushes for region at 1733174817714Disabling writes for close at 1733174817714Writing region close event to WAL at 1733174817716 (+2 ms)Closed at 1733174817716 2024-12-02T21:26:57,720 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:26:57,720 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T21:26:57,722 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:26:57,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:26:57,734 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:26:57,736 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:26:57,737 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:26:57,742 INFO [RS:0;87c3fdb6c570:38553 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:26:57,742 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,743 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T21:26:57,748 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T21:26:57,749 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,749 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,750 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,750 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,750 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,750 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,750 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:26:57,750 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,750 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,750 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,751 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,751 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,751 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:26:57,751 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:26:57,751 DEBUG [RS:0;87c3fdb6c570:38553 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:26:57,754 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,754 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,754 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,754 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,755 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,755 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,38553,1733174816511-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:26:57,770 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:26:57,771 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,38553,1733174816511-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,772 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,772 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.Replication(171): 87c3fdb6c570,38553,1733174816511 started 2024-12-02T21:26:57,786 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:57,786 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(1482): Serving as 87c3fdb6c570,38553,1733174816511, RpcServer on 87c3fdb6c570/172.17.0.3:38553, sessionid=0x10197f0e4850001 2024-12-02T21:26:57,787 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:26:57,787 DEBUG [RS:0;87c3fdb6c570:38553 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87c3fdb6c570,38553,1733174816511 2024-12-02T21:26:57,788 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,38553,1733174816511' 2024-12-02T21:26:57,788 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:26:57,789 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:26:57,789 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:26:57,789 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:26:57,790 DEBUG [RS:0;87c3fdb6c570:38553 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87c3fdb6c570,38553,1733174816511 2024-12-02T21:26:57,790 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,38553,1733174816511' 2024-12-02T21:26:57,790 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:26:57,790 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:26:57,791 DEBUG [RS:0;87c3fdb6c570:38553 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:26:57,791 INFO [RS:0;87c3fdb6c570:38553 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:26:57,791 INFO [RS:0;87c3fdb6c570:38553 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:26:57,887 WARN [87c3fdb6c570:35249 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T21:26:57,906 INFO [RS:0;87c3fdb6c570:38553 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C38553%2C1733174816511, suffix=, logDir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511, archiveDir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/oldWALs, maxLogs=32 2024-12-02T21:26:57,908 INFO [RS:0;87c3fdb6c570:38553 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C38553%2C1733174816511.1733174817908 2024-12-02T21:26:57,917 INFO [RS:0;87c3fdb6c570:38553 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174817908 2024-12-02T21:26:57,920 DEBUG [RS:0;87c3fdb6c570:38553 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34859:34859),(127.0.0.1/127.0.0.1:35645:35645)] 2024-12-02T21:26:58,143 DEBUG [87c3fdb6c570:35249 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:26:58,156 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87c3fdb6c570,38553,1733174816511 2024-12-02T21:26:58,161 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,38553,1733174816511, state=OPENING 2024-12-02T21:26:58,216 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:26:58,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:58,301 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:26:58,303 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:26:58,304 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:26:58,306 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:26:58,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,38553,1733174816511}] 2024-12-02T21:26:58,484 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:26:58,487 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33933, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:26:58,497 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T21:26:58,497 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:26:58,500 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C38553%2C1733174816511.meta, suffix=.meta, logDir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511, archiveDir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/oldWALs, maxLogs=32 2024-12-02T21:26:58,502 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C38553%2C1733174816511.meta.1733174818501.meta 2024-12-02T21:26:58,509 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.meta.1733174818501.meta 2024-12-02T21:26:58,510 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34859:34859),(127.0.0.1/127.0.0.1:35645:35645)] 2024-12-02T21:26:58,512 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:26:58,513 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:26:58,515 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:26:58,520 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:26:58,524 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:26:58,525 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:26:58,525 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T21:26:58,525 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T21:26:58,528 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:26:58,529 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:26:58,530 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:58,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:26:58,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:26:58,532 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:26:58,532 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:58,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:26:58,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:26:58,534 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:26:58,534 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:58,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:26:58,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:26:58,537 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:26:58,537 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:58,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:26:58,538 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:26:58,539 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740 2024-12-02T21:26:58,541 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740 2024-12-02T21:26:58,544 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:26:58,544 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:26:58,545 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:26:58,547 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:26:58,549 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797631, jitterRate=0.014240309596061707}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:26:58,549 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T21:26:58,550 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733174818525Writing region info on filesystem at 1733174818526 (+1 ms)Initializing all the Stores at 1733174818527 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174818528 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174818528Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174818528Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174818528Cleaning up temporary data from old regions at 1733174818544 (+16 ms)Running coprocessor post-open hooks at 1733174818549 (+5 ms)Region opened successfully at 1733174818550 (+1 ms) 2024-12-02T21:26:58,556 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733174818475 2024-12-02T21:26:58,566 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:26:58,567 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T21:26:58,568 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,38553,1733174816511 2024-12-02T21:26:58,570 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,38553,1733174816511, state=OPEN 2024-12-02T21:26:58,704 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:26:58,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:26:58,704 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:26:58,704 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:26:58,704 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87c3fdb6c570,38553,1733174816511 2024-12-02T21:26:58,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:26:58,713 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,38553,1733174816511 in 397 msec 2024-12-02T21:26:58,721 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:26:58,721 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 988 msec 2024-12-02T21:26:58,723 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:26:58,723 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T21:26:58,745 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:26:58,746 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,38553,1733174816511, seqNum=-1] 2024-12-02T21:26:58,762 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:26:58,765 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37219, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:26:58,783 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2530 sec 2024-12-02T21:26:58,784 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733174818783, completionTime=-1 2024-12-02T21:26:58,786 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:26:58,786 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T21:26:58,810 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T21:26:58,810 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733174878810 2024-12-02T21:26:58,811 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733174938811 2024-12-02T21:26:58,811 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 24 msec 2024-12-02T21:26:58,813 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35249,1733174815752-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:58,814 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35249,1733174815752-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:58,814 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35249,1733174815752-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:58,815 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87c3fdb6c570:35249, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:58,816 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:58,816 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:58,823 DEBUG [master/87c3fdb6c570:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T21:26:58,841 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.150sec 2024-12-02T21:26:58,842 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:26:58,844 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:26:58,845 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:26:58,845 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:26:58,846 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:26:58,846 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35249,1733174815752-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:26:58,847 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35249,1733174815752-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:26:58,857 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:26:58,858 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:26:58,859 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35249,1733174815752-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:26:58,927 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:26:58,929 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-02T21:26:58,930 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-02T21:26:58,932 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87c3fdb6c570,35249,-1 for getting cluster id 2024-12-02T21:26:58,935 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T21:26:58,942 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '31685697-1bd4-4883-89ec-279a73b8d67c' 2024-12-02T21:26:58,945 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T21:26:58,945 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "31685697-1bd4-4883-89ec-279a73b8d67c" 2024-12-02T21:26:58,947 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b177d24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:26:58,947 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87c3fdb6c570,35249,-1] 2024-12-02T21:26:58,949 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T21:26:58,951 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:26:58,953 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40780, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T21:26:58,955 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:26:58,956 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:26:58,965 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,38553,1733174816511, seqNum=-1] 2024-12-02T21:26:58,965 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:26:58,968 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38654, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:26:58,986 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87c3fdb6c570,35249,1733174815752 2024-12-02T21:26:58,987 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:26:58,995 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T21:26:59,000 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T21:26:59,005 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 87c3fdb6c570,35249,1733174815752 2024-12-02T21:26:59,007 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7e9fb4b8 2024-12-02T21:26:59,008 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T21:26:59,010 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40794, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T21:26:59,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35249 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T21:26:59,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35249 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T21:26:59,015 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35249 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:26:59,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35249 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-02T21:26:59,024 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:26:59,026 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35249 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-02T21:26:59,026 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:59,029 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:26:59,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35249 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T21:26:59,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741835_1011 (size=389) 2024-12-02T21:26:59,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741835_1011 (size=389) 2024-12-02T21:26:59,091 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9f5c0f572cef7abb2b21386017c7f8f2, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2 2024-12-02T21:26:59,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741836_1012 (size=72) 2024-12-02T21:26:59,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741836_1012 (size=72) 2024-12-02T21:26:59,102 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:26:59,102 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 9f5c0f572cef7abb2b21386017c7f8f2, disabling compactions & flushes 2024-12-02T21:26:59,102 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:26:59,102 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:26:59,102 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. after waiting 0 ms 2024-12-02T21:26:59,102 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:26:59,102 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:26:59,102 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9f5c0f572cef7abb2b21386017c7f8f2: Waiting for close lock at 1733174819102Disabling compacts and flushes for region at 1733174819102Disabling writes for close at 1733174819102Writing region close event to WAL at 1733174819102Closed at 1733174819102 2024-12-02T21:26:59,104 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:26:59,108 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733174819104"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733174819104"}]},"ts":"1733174819104"} 2024-12-02T21:26:59,112 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T21:26:59,114 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:26:59,116 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733174819114"}]},"ts":"1733174819114"} 2024-12-02T21:26:59,121 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-02T21:26:59,123 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9f5c0f572cef7abb2b21386017c7f8f2, ASSIGN}] 2024-12-02T21:26:59,126 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9f5c0f572cef7abb2b21386017c7f8f2, ASSIGN 2024-12-02T21:26:59,128 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9f5c0f572cef7abb2b21386017c7f8f2, ASSIGN; state=OFFLINE, location=87c3fdb6c570,38553,1733174816511; forceNewPlan=false, retain=false 2024-12-02T21:26:59,281 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9f5c0f572cef7abb2b21386017c7f8f2, regionState=OPENING, regionLocation=87c3fdb6c570,38553,1733174816511 2024-12-02T21:26:59,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9f5c0f572cef7abb2b21386017c7f8f2, ASSIGN because future has completed 2024-12-02T21:26:59,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9f5c0f572cef7abb2b21386017c7f8f2, server=87c3fdb6c570,38553,1733174816511}] 2024-12-02T21:26:59,456 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:26:59,456 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9f5c0f572cef7abb2b21386017c7f8f2, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:26:59,457 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:26:59,457 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:26:59,457 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:26:59,457 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:26:59,460 INFO [StoreOpener-9f5c0f572cef7abb2b21386017c7f8f2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:26:59,463 INFO [StoreOpener-9f5c0f572cef7abb2b21386017c7f8f2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9f5c0f572cef7abb2b21386017c7f8f2 columnFamilyName info 2024-12-02T21:26:59,464 DEBUG [StoreOpener-9f5c0f572cef7abb2b21386017c7f8f2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:26:59,465 INFO [StoreOpener-9f5c0f572cef7abb2b21386017c7f8f2-1 {}] regionserver.HStore(327): Store=9f5c0f572cef7abb2b21386017c7f8f2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:26:59,465 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:26:59,466 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:26:59,467 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:26:59,467 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:26:59,467 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:26:59,470 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:26:59,473 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:26:59,474 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9f5c0f572cef7abb2b21386017c7f8f2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788742, jitterRate=0.002937823534011841}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:26:59,474 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:26:59,475 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9f5c0f572cef7abb2b21386017c7f8f2: Running coprocessor pre-open hook at 1733174819458Writing region info on filesystem at 1733174819458Initializing all the Stores at 1733174819460 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174819460Cleaning up temporary data from old regions at 1733174819467 (+7 ms)Running coprocessor post-open hooks at 1733174819474 (+7 ms)Region opened successfully at 1733174819475 (+1 ms) 2024-12-02T21:26:59,477 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2., pid=6, masterSystemTime=1733174819447 2024-12-02T21:26:59,480 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:26:59,480 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:26:59,481 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9f5c0f572cef7abb2b21386017c7f8f2, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,38553,1733174816511 2024-12-02T21:26:59,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9f5c0f572cef7abb2b21386017c7f8f2, server=87c3fdb6c570,38553,1733174816511 because future has completed 2024-12-02T21:26:59,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T21:26:59,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9f5c0f572cef7abb2b21386017c7f8f2, server=87c3fdb6c570,38553,1733174816511 in 196 msec 2024-12-02T21:26:59,495 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T21:26:59,495 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9f5c0f572cef7abb2b21386017c7f8f2, ASSIGN in 368 msec 2024-12-02T21:26:59,496 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:26:59,497 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733174819497"}]},"ts":"1733174819497"} 2024-12-02T21:26:59,500 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-02T21:26:59,502 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:26:59,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 485 msec 2024-12-02T21:27:04,038 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T21:27:04,079 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T21:27:04,081 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-02T21:27:06,072 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T21:27:06,073 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T21:27:06,076 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-02T21:27:06,076 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-02T21:27:06,078 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:27:06,079 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T21:27:06,079 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T21:27:06,079 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T21:27:09,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35249 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T21:27:09,083 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-02T21:27:09,085 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-02T21:27:09,091 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-02T21:27:09,091 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:27:09,092 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C38553%2C1733174816511.1733174829092 2024-12-02T21:27:09,100 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:09,100 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:09,100 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:09,100 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:09,101 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:09,101 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174817908 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174829092 2024-12-02T21:27:09,104 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35645:35645),(127.0.0.1/127.0.0.1:34859:34859)] 2024-12-02T21:27:09,104 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174817908 is not closed yet, will try archiving it next time 2024-12-02T21:27:09,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741833_1009 (size=451) 2024-12-02T21:27:09,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741833_1009 (size=451) 2024-12-02T21:27:09,113 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2., hostname=87c3fdb6c570,38553,1733174816511, seqNum=2] 2024-12-02T21:27:09,507 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174817908 to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/oldWALs/87c3fdb6c570%2C38553%2C1733174816511.1733174817908 2024-12-02T21:27:21,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38553 {}] regionserver.HRegion(8855): Flush requested on 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:27:21,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9f5c0f572cef7abb2b21386017c7f8f2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:27:21,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/60d3f184185147e885febe9813e59f43 is 1080, key is row0001/info:/1733174829116/Put/seqid=0 2024-12-02T21:27:21,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741838_1014 (size=12509) 2024-12-02T21:27:21,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741838_1014 (size=12509) 2024-12-02T21:27:21,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/60d3f184185147e885febe9813e59f43 2024-12-02T21:27:21,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/60d3f184185147e885febe9813e59f43 as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/60d3f184185147e885febe9813e59f43 2024-12-02T21:27:21,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/60d3f184185147e885febe9813e59f43, entries=7, sequenceid=11, filesize=12.2 K 2024-12-02T21:27:21,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9f5c0f572cef7abb2b21386017c7f8f2 in 162ms, sequenceid=11, compaction requested=false 2024-12-02T21:27:21,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9f5c0f572cef7abb2b21386017c7f8f2: 2024-12-02T21:27:24,890 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:27:29,161 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C38553%2C1733174816511.1733174849161 2024-12-02T21:27:29,413 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 249 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK], DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK]] 2024-12-02T21:27:29,413 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:29,413 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:29,414 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:29,414 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:29,414 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:29,414 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174829092 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174849161 2024-12-02T21:27:29,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741837_1013 (size=12399) 2024-12-02T21:27:29,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741837_1013 (size=12399) 2024-12-02T21:27:29,420 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34859:34859),(127.0.0.1/127.0.0.1:35645:35645)] 2024-12-02T21:27:29,420 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174829092 is not closed yet, will try archiving it next time 2024-12-02T21:27:29,624 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:31,828 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:34,033 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:36,237 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:36,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38553 {}] regionserver.HRegion(8855): Flush requested on 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:27:36,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9f5c0f572cef7abb2b21386017c7f8f2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:27:36,439 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:36,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/01703a0a545f4ca5b3617e8f652d0ce4 is 1080, key is row0008/info:/1733174843149/Put/seqid=0 2024-12-02T21:27:36,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741840_1016 (size=12509) 2024-12-02T21:27:36,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741840_1016 (size=12509) 2024-12-02T21:27:36,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/01703a0a545f4ca5b3617e8f652d0ce4 2024-12-02T21:27:36,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/01703a0a545f4ca5b3617e8f652d0ce4 as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/01703a0a545f4ca5b3617e8f652d0ce4 2024-12-02T21:27:36,478 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/01703a0a545f4ca5b3617e8f652d0ce4, entries=7, sequenceid=21, filesize=12.2 K 2024-12-02T21:27:36,684 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 204 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:36,684 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9f5c0f572cef7abb2b21386017c7f8f2 in 447ms, sequenceid=21, compaction requested=false 2024-12-02T21:27:36,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9f5c0f572cef7abb2b21386017c7f8f2: 2024-12-02T21:27:36,685 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-02T21:27:36,685 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:27:36,687 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/60d3f184185147e885febe9813e59f43 because midkey is the same as first or last row 2024-12-02T21:27:38,442 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:38,887 INFO [master/87c3fdb6c570:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T21:27:38,887 INFO [master/87c3fdb6c570:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T21:27:40,646 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:40,649 WARN [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:40,650 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C38553%2C1733174816511:(num 1733174849161) roll requested 2024-12-02T21:27:40,651 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C38553%2C1733174816511.1733174860651 2024-12-02T21:27:40,858 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:40,859 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:40,859 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:40,859 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:40,859 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:40,859 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:40,860 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174849161 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174860651 2024-12-02T21:27:40,861 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34859:34859),(127.0.0.1/127.0.0.1:35645:35645)] 2024-12-02T21:27:40,861 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174849161 is not closed yet, will try archiving it next time 2024-12-02T21:27:40,861 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174829092 to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/oldWALs/87c3fdb6c570%2C38553%2C1733174816511.1733174829092 2024-12-02T21:27:40,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741839_1015 (size=7739) 2024-12-02T21:27:40,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741839_1015 (size=7739) 2024-12-02T21:27:42,851 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:44,457 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9f5c0f572cef7abb2b21386017c7f8f2, had cached 0 bytes from a total of 25018 2024-12-02T21:27:45,055 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:47,259 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:49,463 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:51,465 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T21:27:51,465 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C38553%2C1733174816511.1733174871465 2024-12-02T21:27:54,891 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:27:56,474 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:56,476 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK], DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK]] 2024-12-02T21:27:56,476 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C38553%2C1733174816511:(num 1733174871465) roll requested 2024-12-02T21:27:56,476 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:56,476 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:56,476 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:56,477 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:56,477 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:27:56,477 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174860651 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174871465 2024-12-02T21:27:56,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741841_1017 (size=4753) 2024-12-02T21:27:56,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741841_1017 (size=4753) 2024-12-02T21:27:56,485 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35645:35645),(127.0.0.1/127.0.0.1:34859:34859)] 2024-12-02T21:27:56,486 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C38553%2C1733174816511.1733174876486 2024-12-02T21:28:01,489 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK], DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK]] 2024-12-02T21:28:01,489 WARN [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK], DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK]] 2024-12-02T21:28:01,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38553 {}] regionserver.HRegion(8855): Flush requested on 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:28:01,489 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9f5c0f572cef7abb2b21386017c7f8f2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:28:01,496 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK], DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK]] 2024-12-02T21:28:01,497 WARN [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK], DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK]] 2024-12-02T21:28:03,490 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T21:28:06,492 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK], DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK]] 2024-12-02T21:28:06,492 WARN [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK], DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK]] 2024-12-02T21:28:06,492 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:06,492 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:06,492 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:06,492 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:06,492 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:06,493 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174871465 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174876486 2024-12-02T21:28:06,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741842_1018 (size=1569) 2024-12-02T21:28:06,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741842_1018 (size=1569) 2024-12-02T21:28:06,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/6c9deeb654cf48a2bd676e3d9113acac is 1080, key is row0015/info:/1733174858240/Put/seqid=0 2024-12-02T21:28:06,508 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35645:35645),(127.0.0.1/127.0.0.1:34859:34859)] 2024-12-02T21:28:06,508 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174871465 is not closed yet, will try archiving it next time 2024-12-02T21:28:06,509 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C38553%2C1733174816511:(num 1733174876486) roll requested 2024-12-02T21:28:06,509 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C38553%2C1733174816511.1733174886509 2024-12-02T21:28:06,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741844_1020 (size=12509) 2024-12-02T21:28:06,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741844_1020 (size=12509) 2024-12-02T21:28:06,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/6c9deeb654cf48a2bd676e3d9113acac 2024-12-02T21:28:06,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/6c9deeb654cf48a2bd676e3d9113acac as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/6c9deeb654cf48a2bd676e3d9113acac 2024-12-02T21:28:06,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/6c9deeb654cf48a2bd676e3d9113acac, entries=7, sequenceid=31, filesize=12.2 K 2024-12-02T21:28:11,518 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK], DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK]] 2024-12-02T21:28:11,518 WARN [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK], DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK]] 2024-12-02T21:28:11,552 INFO [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK], DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK]] 2024-12-02T21:28:11,552 WARN [FSHLog-0-hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2-prefix:87c3fdb6c570,38553,1733174816511 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37419,DS-fb0e30c7-ec59-414c-8df8-f102bfe5a1de,DISK], DatanodeInfoWithStorage[127.0.0.1:37999,DS-1c94e1ee-5801-4061-9909-753fd4317f65,DISK]] 2024-12-02T21:28:11,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9f5c0f572cef7abb2b21386017c7f8f2 in 10063ms, sequenceid=31, compaction requested=true 2024-12-02T21:28:11,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9f5c0f572cef7abb2b21386017c7f8f2: 2024-12-02T21:28:11,552 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,552 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,552 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-02T21:28:11,552 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:28:11,553 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,553 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/60d3f184185147e885febe9813e59f43 because midkey is the same as first or last row 2024-12-02T21:28:11,553 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,553 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,553 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174876486 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174886509 2024-12-02T21:28:11,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f5c0f572cef7abb2b21386017c7f8f2:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:28:11,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741843_1019 (size=438) 2024-12-02T21:28:11,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:28:11,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741843_1019 (size=438) 2024-12-02T21:28:11,556 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:28:11,557 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174849161 to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/oldWALs/87c3fdb6c570%2C38553%2C1733174816511.1733174849161 2024-12-02T21:28:11,558 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34859:34859),(127.0.0.1/127.0.0.1:35645:35645)] 2024-12-02T21:28:11,558 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C38553%2C1733174816511:(num 1733174891558) roll requested 2024-12-02T21:28:11,558 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C38553%2C1733174816511.1733174891558 2024-12-02T21:28:11,562 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174860651 to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/oldWALs/87c3fdb6c570%2C38553%2C1733174816511.1733174860651 2024-12-02T21:28:11,563 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:28:11,564 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174871465 to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/oldWALs/87c3fdb6c570%2C38553%2C1733174816511.1733174871465 2024-12-02T21:28:11,565 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.HStore(1541): 9f5c0f572cef7abb2b21386017c7f8f2/info is initiating minor compaction (all files) 2024-12-02T21:28:11,566 INFO [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9f5c0f572cef7abb2b21386017c7f8f2/info in TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:28:11,566 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174876486 to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/oldWALs/87c3fdb6c570%2C38553%2C1733174816511.1733174876486 2024-12-02T21:28:11,566 INFO [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/60d3f184185147e885febe9813e59f43, hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/01703a0a545f4ca5b3617e8f652d0ce4, hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/6c9deeb654cf48a2bd676e3d9113acac] into tmpdir=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp, totalSize=36.6 K 2024-12-02T21:28:11,568 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] compactions.Compactor(225): Compacting 60d3f184185147e885febe9813e59f43, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733174829116 2024-12-02T21:28:11,569 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] compactions.Compactor(225): Compacting 01703a0a545f4ca5b3617e8f652d0ce4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733174843149 2024-12-02T21:28:11,570 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6c9deeb654cf48a2bd676e3d9113acac, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733174858240 2024-12-02T21:28:11,578 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,578 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,579 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,579 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,579 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,579 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174886509 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174891558 2024-12-02T21:28:11,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741845_1021 (size=93) 2024-12-02T21:28:11,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741845_1021 (size=93) 2024-12-02T21:28:11,582 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174886509 to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/oldWALs/87c3fdb6c570%2C38553%2C1733174816511.1733174886509 2024-12-02T21:28:11,583 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34859:34859),(127.0.0.1/127.0.0.1:35645:35645)] 2024-12-02T21:28:11,583 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C38553%2C1733174816511.1733174891583 2024-12-02T21:28:11,606 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,606 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,606 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,606 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,606 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:11,607 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174891558 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174891583 2024-12-02T21:28:11,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741846_1022 (size=1258) 2024-12-02T21:28:11,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741846_1022 (size=1258) 2024-12-02T21:28:11,609 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35645:35645),(127.0.0.1/127.0.0.1:34859:34859)] 2024-12-02T21:28:11,609 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/WALs/87c3fdb6c570,38553,1733174816511/87c3fdb6c570%2C38553%2C1733174816511.1733174891558 is not closed yet, will try archiving it next time 2024-12-02T21:28:11,618 INFO [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f5c0f572cef7abb2b21386017c7f8f2#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:28:11,619 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/14f81159d66743aaacb99f48c03cc3fc is 1080, key is row0001/info:/1733174829116/Put/seqid=0 2024-12-02T21:28:11,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741848_1024 (size=27710) 2024-12-02T21:28:11,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741848_1024 (size=27710) 2024-12-02T21:28:11,638 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/14f81159d66743aaacb99f48c03cc3fc as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/14f81159d66743aaacb99f48c03cc3fc 2024-12-02T21:28:11,657 INFO [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9f5c0f572cef7abb2b21386017c7f8f2/info of 9f5c0f572cef7abb2b21386017c7f8f2 into 14f81159d66743aaacb99f48c03cc3fc(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:28:11,657 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9f5c0f572cef7abb2b21386017c7f8f2: 2024-12-02T21:28:11,660 INFO [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2., storeName=9f5c0f572cef7abb2b21386017c7f8f2/info, priority=13, startTime=1733174891554; duration=0sec 2024-12-02T21:28:11,660 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-02T21:28:11,660 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:28:11,660 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/14f81159d66743aaacb99f48c03cc3fc because midkey is the same as first or last row 2024-12-02T21:28:11,661 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-02T21:28:11,661 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:28:11,661 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/14f81159d66743aaacb99f48c03cc3fc because midkey is the same as first or last row 2024-12-02T21:28:11,661 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-02T21:28:11,661 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:28:11,661 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/14f81159d66743aaacb99f48c03cc3fc because midkey is the same as first or last row 2024-12-02T21:28:11,661 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:28:11,662 DEBUG [RS:0;87c3fdb6c570:38553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f5c0f572cef7abb2b21386017c7f8f2:info 2024-12-02T21:28:23,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38553 {}] regionserver.HRegion(8855): Flush requested on 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:28:23,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9f5c0f572cef7abb2b21386017c7f8f2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:28:23,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/4685d4a3042b4c918073d1066827e24b is 1080, key is row0022/info:/1733174891585/Put/seqid=0 2024-12-02T21:28:23,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741849_1025 (size=12509) 2024-12-02T21:28:23,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741849_1025 (size=12509) 2024-12-02T21:28:23,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/4685d4a3042b4c918073d1066827e24b 2024-12-02T21:28:23,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/4685d4a3042b4c918073d1066827e24b as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/4685d4a3042b4c918073d1066827e24b 2024-12-02T21:28:23,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/4685d4a3042b4c918073d1066827e24b, entries=7, sequenceid=42, filesize=12.2 K 2024-12-02T21:28:23,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9f5c0f572cef7abb2b21386017c7f8f2 in 42ms, sequenceid=42, compaction requested=false 2024-12-02T21:28:23,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9f5c0f572cef7abb2b21386017c7f8f2: 2024-12-02T21:28:23,649 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-02T21:28:23,649 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:28:23,649 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/14f81159d66743aaacb99f48c03cc3fc because midkey is the same as first or last row 2024-12-02T21:28:24,891 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:28:29,458 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9f5c0f572cef7abb2b21386017c7f8f2, had cached 0 bytes from a total of 40219 2024-12-02T21:28:31,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T21:28:31,621 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:28:31,621 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:28:31,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:31,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:31,627 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T21:28:31,628 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:28:31,628 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1188070929, stopped=false 2024-12-02T21:28:31,628 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87c3fdb6c570,35249,1733174815752 2024-12-02T21:28:31,895 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:28:31,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:28:31,895 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:31,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:31,895 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:28:31,896 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:28:31,896 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:28:31,896 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:28:31,896 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:31,896 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:28:31,896 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87c3fdb6c570,38553,1733174816511' ***** 2024-12-02T21:28:31,896 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T21:28:31,897 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:28:31,897 INFO [RS:0;87c3fdb6c570:38553 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:28:31,897 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T21:28:31,897 INFO [RS:0;87c3fdb6c570:38553 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:28:31,897 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(3091): Received CLOSE for 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:28:31,898 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(959): stopping server 87c3fdb6c570,38553,1733174816511 2024-12-02T21:28:31,898 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:28:31,898 INFO [RS:0;87c3fdb6c570:38553 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87c3fdb6c570:38553. 2024-12-02T21:28:31,898 DEBUG [RS:0;87c3fdb6c570:38553 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:28:31,898 DEBUG [RS:0;87c3fdb6c570:38553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:31,898 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:28:31,898 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9f5c0f572cef7abb2b21386017c7f8f2, disabling compactions & flushes 2024-12-02T21:28:31,898 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:28:31,898 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:28:31,898 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:28:31,898 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:28:31,898 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T21:28:31,898 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. after waiting 0 ms 2024-12-02T21:28:31,898 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:28:31,899 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T21:28:31,899 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 9f5c0f572cef7abb2b21386017c7f8f2 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-02T21:28:31,899 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:28:31,899 DEBUG [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 9f5c0f572cef7abb2b21386017c7f8f2=TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.} 2024-12-02T21:28:31,899 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:28:31,899 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:28:31,899 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:28:31,899 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:28:31,899 DEBUG [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9f5c0f572cef7abb2b21386017c7f8f2 2024-12-02T21:28:31,899 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-02T21:28:31,905 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/8ff272d256a548099873917cc85619c6 is 1080, key is row0029/info:/1733174905610/Put/seqid=0 2024-12-02T21:28:31,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741850_1026 (size=8193) 2024-12-02T21:28:31,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741850_1026 (size=8193) 2024-12-02T21:28:31,916 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/8ff272d256a548099873917cc85619c6 2024-12-02T21:28:31,920 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/.tmp/info/c33f4e6633244edb836ef9798211d21e is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2./info:regioninfo/1733174819481/Put/seqid=0 2024-12-02T21:28:31,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741851_1027 (size=7016) 2024-12-02T21:28:31,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741851_1027 (size=7016) 2024-12-02T21:28:31,927 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/.tmp/info/8ff272d256a548099873917cc85619c6 as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/8ff272d256a548099873917cc85619c6 2024-12-02T21:28:31,928 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/.tmp/info/c33f4e6633244edb836ef9798211d21e 2024-12-02T21:28:31,936 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/8ff272d256a548099873917cc85619c6, entries=3, sequenceid=48, filesize=8.0 K 2024-12-02T21:28:31,938 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9f5c0f572cef7abb2b21386017c7f8f2 in 38ms, sequenceid=48, compaction requested=true 2024-12-02T21:28:31,938 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/60d3f184185147e885febe9813e59f43, hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/01703a0a545f4ca5b3617e8f652d0ce4, hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/6c9deeb654cf48a2bd676e3d9113acac] to archive 2024-12-02T21:28:31,942 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T21:28:31,946 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/60d3f184185147e885febe9813e59f43 to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/60d3f184185147e885febe9813e59f43 2024-12-02T21:28:31,948 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/01703a0a545f4ca5b3617e8f652d0ce4 to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/01703a0a545f4ca5b3617e8f652d0ce4 2024-12-02T21:28:31,950 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/6c9deeb654cf48a2bd676e3d9113acac to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/info/6c9deeb654cf48a2bd676e3d9113acac 2024-12-02T21:28:31,954 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/.tmp/ns/21ae832db47041acb0ce5bbeafd57281 is 43, key is default/ns:d/1733174818769/Put/seqid=0 2024-12-02T21:28:31,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741852_1028 (size=5153) 2024-12-02T21:28:31,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741852_1028 (size=5153) 2024-12-02T21:28:31,961 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/.tmp/ns/21ae832db47041acb0ce5bbeafd57281 2024-12-02T21:28:31,966 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=87c3fdb6c570:35249 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-02T21:28:31,972 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [60d3f184185147e885febe9813e59f43=12509, 01703a0a545f4ca5b3617e8f652d0ce4=12509, 6c9deeb654cf48a2bd676e3d9113acac=12509] 2024-12-02T21:28:31,979 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/default/TestLogRolling-testSlowSyncLogRolling/9f5c0f572cef7abb2b21386017c7f8f2/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-02T21:28:31,981 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:28:31,982 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9f5c0f572cef7abb2b21386017c7f8f2: Waiting for close lock at 1733174911898Running coprocessor pre-close hooks at 1733174911898Disabling compacts and flushes for region at 1733174911898Disabling writes for close at 1733174911898Obtaining lock to block concurrent updates at 1733174911899 (+1 ms)Preparing flush snapshotting stores in 9f5c0f572cef7abb2b21386017c7f8f2 at 1733174911899Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733174911899Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. at 1733174911900 (+1 ms)Flushing 9f5c0f572cef7abb2b21386017c7f8f2/info: creating writer at 1733174911900Flushing 9f5c0f572cef7abb2b21386017c7f8f2/info: appending metadata at 1733174911904 (+4 ms)Flushing 9f5c0f572cef7abb2b21386017c7f8f2/info: closing flushed file at 1733174911904Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@309d321c: reopening flushed file at 1733174911925 (+21 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9f5c0f572cef7abb2b21386017c7f8f2 in 38ms, sequenceid=48, compaction requested=true at 1733174911938 (+13 ms)Writing region close event to WAL at 1733174911974 (+36 ms)Running coprocessor post-close hooks at 1733174911980 (+6 ms)Closed at 1733174911981 (+1 ms) 2024-12-02T21:28:31,982 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733174819011.9f5c0f572cef7abb2b21386017c7f8f2. 2024-12-02T21:28:31,989 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/.tmp/table/fd179346f84c493ab21a976fcdfa3e4e is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733174819497/Put/seqid=0 2024-12-02T21:28:31,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741853_1029 (size=5396) 2024-12-02T21:28:31,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741853_1029 (size=5396) 2024-12-02T21:28:31,995 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/.tmp/table/fd179346f84c493ab21a976fcdfa3e4e 2024-12-02T21:28:32,003 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/.tmp/info/c33f4e6633244edb836ef9798211d21e as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/info/c33f4e6633244edb836ef9798211d21e 2024-12-02T21:28:32,010 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/info/c33f4e6633244edb836ef9798211d21e, entries=10, sequenceid=11, filesize=6.9 K 2024-12-02T21:28:32,011 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/.tmp/ns/21ae832db47041acb0ce5bbeafd57281 as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/ns/21ae832db47041acb0ce5bbeafd57281 2024-12-02T21:28:32,019 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/ns/21ae832db47041acb0ce5bbeafd57281, entries=2, sequenceid=11, filesize=5.0 K 2024-12-02T21:28:32,020 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/.tmp/table/fd179346f84c493ab21a976fcdfa3e4e as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/table/fd179346f84c493ab21a976fcdfa3e4e 2024-12-02T21:28:32,029 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/table/fd179346f84c493ab21a976fcdfa3e4e, entries=2, sequenceid=11, filesize=5.3 K 2024-12-02T21:28:32,030 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false 2024-12-02T21:28:32,036 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-02T21:28:32,037 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:28:32,037 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:28:32,037 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733174911899Running coprocessor pre-close hooks at 1733174911899Disabling compacts and flushes for region at 1733174911899Disabling writes for close at 1733174911899Obtaining lock to block concurrent updates at 1733174911899Preparing flush snapshotting stores in 1588230740 at 1733174911899Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733174911900 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733174911900Flushing 1588230740/info: creating writer at 1733174911901 (+1 ms)Flushing 1588230740/info: appending metadata at 1733174911920 (+19 ms)Flushing 1588230740/info: closing flushed file at 1733174911920Flushing 1588230740/ns: creating writer at 1733174911936 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733174911953 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733174911953Flushing 1588230740/table: creating writer at 1733174911970 (+17 ms)Flushing 1588230740/table: appending metadata at 1733174911988 (+18 ms)Flushing 1588230740/table: closing flushed file at 1733174911988Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77209026: reopening flushed file at 1733174912002 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cdf113c: reopening flushed file at 1733174912010 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d3ec27b: reopening flushed file at 1733174912019 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false at 1733174912030 (+11 ms)Writing region close event to WAL at 1733174912032 (+2 ms)Running coprocessor post-close hooks at 1733174912037 (+5 ms)Closed at 1733174912037 2024-12-02T21:28:32,037 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:28:32,099 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(976): stopping server 87c3fdb6c570,38553,1733174816511; all regions closed. 2024-12-02T21:28:32,101 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,101 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,101 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,102 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,102 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741834_1010 (size=3066) 2024-12-02T21:28:32,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741834_1010 (size=3066) 2024-12-02T21:28:32,108 DEBUG [RS:0;87c3fdb6c570:38553 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/oldWALs 2024-12-02T21:28:32,109 INFO [RS:0;87c3fdb6c570:38553 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C38553%2C1733174816511.meta:.meta(num 1733174818501) 2024-12-02T21:28:32,109 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,109 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,109 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,110 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,110 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741847_1023 (size=12695) 2024-12-02T21:28:32,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741847_1023 (size=12695) 2024-12-02T21:28:32,116 DEBUG [RS:0;87c3fdb6c570:38553 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/oldWALs 2024-12-02T21:28:32,116 INFO [RS:0;87c3fdb6c570:38553 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C38553%2C1733174816511:(num 1733174891583) 2024-12-02T21:28:32,116 DEBUG [RS:0;87c3fdb6c570:38553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:32,116 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:28:32,116 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:28:32,116 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.ChoreService(370): Chore service for: regionserver/87c3fdb6c570:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T21:28:32,117 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:28:32,117 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:28:32,117 INFO [RS:0;87c3fdb6c570:38553 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38553 2024-12-02T21:28:32,306 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87c3fdb6c570,38553,1733174816511 2024-12-02T21:28:32,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:28:32,306 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:28:32,316 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87c3fdb6c570,38553,1733174816511] 2024-12-02T21:28:32,326 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87c3fdb6c570,38553,1733174816511 already deleted, retry=false 2024-12-02T21:28:32,326 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87c3fdb6c570,38553,1733174816511 expired; onlineServers=0 2024-12-02T21:28:32,326 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87c3fdb6c570,35249,1733174815752' ***** 2024-12-02T21:28:32,326 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:28:32,327 INFO [M:0;87c3fdb6c570:35249 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:28:32,327 INFO [M:0;87c3fdb6c570:35249 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:28:32,327 DEBUG [M:0;87c3fdb6c570:35249 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:28:32,327 DEBUG [M:0;87c3fdb6c570:35249 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:28:32,327 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:28:32,327 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174817633 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174817633,5,FailOnTimeoutGroup] 2024-12-02T21:28:32,327 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174817632 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174817632,5,FailOnTimeoutGroup] 2024-12-02T21:28:32,327 INFO [M:0;87c3fdb6c570:35249 {}] hbase.ChoreService(370): Chore service for: master/87c3fdb6c570:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T21:28:32,327 INFO [M:0;87c3fdb6c570:35249 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:28:32,327 DEBUG [M:0;87c3fdb6c570:35249 {}] master.HMaster(1795): Stopping service threads 2024-12-02T21:28:32,328 INFO [M:0;87c3fdb6c570:35249 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:28:32,328 INFO [M:0;87c3fdb6c570:35249 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:28:32,328 INFO [M:0;87c3fdb6c570:35249 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:28:32,328 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:28:32,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:28:32,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:32,337 DEBUG [M:0;87c3fdb6c570:35249 {}] zookeeper.ZKUtil(347): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:28:32,337 WARN [M:0;87c3fdb6c570:35249 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:28:32,338 INFO [M:0;87c3fdb6c570:35249 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/.lastflushedseqids 2024-12-02T21:28:32,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741854_1030 (size=130) 2024-12-02T21:28:32,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741854_1030 (size=130) 2024-12-02T21:28:32,353 INFO [M:0;87c3fdb6c570:35249 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T21:28:32,353 INFO [M:0;87c3fdb6c570:35249 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:28:32,353 DEBUG [M:0;87c3fdb6c570:35249 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:28:32,353 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:32,353 DEBUG [M:0;87c3fdb6c570:35249 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:32,353 DEBUG [M:0;87c3fdb6c570:35249 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:28:32,353 DEBUG [M:0;87c3fdb6c570:35249 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:32,353 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-12-02T21:28:32,370 DEBUG [M:0;87c3fdb6c570:35249 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3d8a6b5f2b414823b29814d91e1647e7 is 82, key is hbase:meta,,1/info:regioninfo/1733174818568/Put/seqid=0 2024-12-02T21:28:32,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741855_1031 (size=5672) 2024-12-02T21:28:32,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741855_1031 (size=5672) 2024-12-02T21:28:32,417 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:28:32,417 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38553-0x10197f0e4850001, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:28:32,417 INFO [RS:0;87c3fdb6c570:38553 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:28:32,417 INFO [RS:0;87c3fdb6c570:38553 {}] regionserver.HRegionServer(1031): Exiting; stopping=87c3fdb6c570,38553,1733174816511; zookeeper connection closed. 2024-12-02T21:28:32,418 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@463a04c3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@463a04c3 2024-12-02T21:28:32,418 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T21:28:32,778 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3d8a6b5f2b414823b29814d91e1647e7 2024-12-02T21:28:32,801 DEBUG [M:0;87c3fdb6c570:35249 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/897a87fe76374529a69aee67508b9223 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733174819504/Put/seqid=0 2024-12-02T21:28:32,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741856_1032 (size=6247) 2024-12-02T21:28:32,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741856_1032 (size=6247) 2024-12-02T21:28:32,808 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/897a87fe76374529a69aee67508b9223 2024-12-02T21:28:32,815 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 897a87fe76374529a69aee67508b9223 2024-12-02T21:28:32,831 DEBUG [M:0;87c3fdb6c570:35249 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/989429fac706414a866922efda47fed8 is 69, key is 87c3fdb6c570,38553,1733174816511/rs:state/1733174817667/Put/seqid=0 2024-12-02T21:28:32,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741857_1033 (size=5156) 2024-12-02T21:28:32,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741857_1033 (size=5156) 2024-12-02T21:28:32,838 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/989429fac706414a866922efda47fed8 2024-12-02T21:28:32,858 DEBUG [M:0;87c3fdb6c570:35249 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1088b985165941f8b80618e128a93c87 is 52, key is load_balancer_on/state:d/1733174818991/Put/seqid=0 2024-12-02T21:28:32,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741858_1034 (size=5056) 2024-12-02T21:28:32,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741858_1034 (size=5056) 2024-12-02T21:28:32,865 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1088b985165941f8b80618e128a93c87 2024-12-02T21:28:32,874 DEBUG [M:0;87c3fdb6c570:35249 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3d8a6b5f2b414823b29814d91e1647e7 as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3d8a6b5f2b414823b29814d91e1647e7 2024-12-02T21:28:32,883 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3d8a6b5f2b414823b29814d91e1647e7, entries=8, sequenceid=59, filesize=5.5 K 2024-12-02T21:28:32,885 DEBUG [M:0;87c3fdb6c570:35249 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/897a87fe76374529a69aee67508b9223 as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/897a87fe76374529a69aee67508b9223 2024-12-02T21:28:32,895 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 897a87fe76374529a69aee67508b9223 2024-12-02T21:28:32,896 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/897a87fe76374529a69aee67508b9223, entries=6, sequenceid=59, filesize=6.1 K 2024-12-02T21:28:32,897 DEBUG [M:0;87c3fdb6c570:35249 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/989429fac706414a866922efda47fed8 as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/989429fac706414a866922efda47fed8 2024-12-02T21:28:32,905 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/989429fac706414a866922efda47fed8, entries=1, sequenceid=59, filesize=5.0 K 2024-12-02T21:28:32,907 DEBUG [M:0;87c3fdb6c570:35249 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1088b985165941f8b80618e128a93c87 as hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1088b985165941f8b80618e128a93c87 2024-12-02T21:28:32,914 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1088b985165941f8b80618e128a93c87, entries=1, sequenceid=59, filesize=4.9 K 2024-12-02T21:28:32,916 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 562ms, sequenceid=59, compaction requested=false 2024-12-02T21:28:32,917 INFO [M:0;87c3fdb6c570:35249 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:32,917 DEBUG [M:0;87c3fdb6c570:35249 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733174912353Disabling compacts and flushes for region at 1733174912353Disabling writes for close at 1733174912353Obtaining lock to block concurrent updates at 1733174912353Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733174912353Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1733174912354 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733174912355 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733174912355Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733174912370 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733174912370Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733174912787 (+417 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733174912801 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733174912801Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733174912815 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733174912830 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733174912830Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733174912843 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733174912857 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733174912857Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@391d3188: reopening flushed file at 1733174912872 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d333bbc: reopening flushed file at 1733174912883 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3198380a: reopening flushed file at 1733174912896 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@223fb324: reopening flushed file at 1733174912905 (+9 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 562ms, sequenceid=59, compaction requested=false at 1733174912916 (+11 ms)Writing region close event to WAL at 1733174912917 (+1 ms)Closed at 1733174912917 2024-12-02T21:28:32,918 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,918 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,918 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,919 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,919 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:32,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37999 is added to blk_1073741830_1006 (size=27973) 2024-12-02T21:28:32,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37419 is added to blk_1073741830_1006 (size=27973) 2024-12-02T21:28:32,922 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:28:32,922 INFO [M:0;87c3fdb6c570:35249 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T21:28:32,922 INFO [M:0;87c3fdb6c570:35249 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35249 2024-12-02T21:28:32,922 INFO [M:0;87c3fdb6c570:35249 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:28:33,118 INFO [M:0;87c3fdb6c570:35249 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:28:33,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:28:33,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35249-0x10197f0e4850000, quorum=127.0.0.1:58312, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:28:33,124 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:33,127 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:28:33,127 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:28:33,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:28:33,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/hadoop.log.dir/,STOPPED} 2024-12-02T21:28:33,131 WARN [BP-619167935-172.17.0.3-1733174811821 heartbeating to localhost/127.0.0.1:43567 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:28:33,131 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:28:33,132 WARN [BP-619167935-172.17.0.3-1733174811821 heartbeating to localhost/127.0.0.1:43567 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-619167935-172.17.0.3-1733174811821 (Datanode Uuid d4213cd8-145b-4ac8-8e63-9e550960a701) service to localhost/127.0.0.1:43567 2024-12-02T21:28:33,132 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:28:33,133 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc/data/data3/current/BP-619167935-172.17.0.3-1733174811821 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:33,133 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc/data/data4/current/BP-619167935-172.17.0.3-1733174811821 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:33,134 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:28:33,141 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:33,142 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:28:33,142 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:28:33,142 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:28:33,142 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/hadoop.log.dir/,STOPPED} 2024-12-02T21:28:33,145 WARN [BP-619167935-172.17.0.3-1733174811821 heartbeating to localhost/127.0.0.1:43567 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:28:33,145 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:28:33,145 WARN [BP-619167935-172.17.0.3-1733174811821 heartbeating to localhost/127.0.0.1:43567 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-619167935-172.17.0.3-1733174811821 (Datanode Uuid 9f6b0d9f-051f-4740-ad58-013776d1c904) service to localhost/127.0.0.1:43567 2024-12-02T21:28:33,145 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:28:33,145 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc/data/data1/current/BP-619167935-172.17.0.3-1733174811821 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:33,146 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/cluster_3068bd51-b8aa-5c20-5fc9-6ee3178e21fc/data/data2/current/BP-619167935-172.17.0.3-1733174811821 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:33,146 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:28:33,161 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:28:33,162 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:28:33,162 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:28:33,162 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:28:33,162 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/hadoop.log.dir/,STOPPED} 2024-12-02T21:28:33,175 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:28:33,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T21:28:33,231 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=82 (was 12) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43567 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43567 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43567 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/87c3fdb6c570:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43567 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43567 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@1884553c java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: master/87c3fdb6c570:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/87c3fdb6c570:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43567 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:43567 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43567 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/87c3fdb6c570:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=259 (was 279), ProcessCount=11 (was 11), AvailableMemoryMB=2898 (was 3668) 2024-12-02T21:28:33,245 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=83, OpenFileDescriptor=405, MaxFileDescriptor=1048576, SystemLoadAverage=259, ProcessCount=11, AvailableMemoryMB=2898 2024-12-02T21:28:33,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:28:33,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/hadoop.log.dir so I do NOT create it in target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b 2024-12-02T21:28:33,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4c84e7b4-d150-0212-6061-ec15c63d1e34/hadoop.tmp.dir so I do NOT create it in target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b 2024-12-02T21:28:33,246 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495, deleteOnExit=true 2024-12-02T21:28:33,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T21:28:33,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/test.cache.data in system properties and HBase conf 2024-12-02T21:28:33,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:28:33,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:28:33,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:28:33,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:28:33,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T21:28:33,248 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:28:33,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:28:33,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:28:33,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:28:33,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:28:33,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:28:33,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:28:33,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:28:33,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:28:33,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:28:33,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:28:33,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:28:33,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:28:33,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:28:33,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:28:33,280 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:28:33,687 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:28:33,693 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:28:33,702 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:28:33,702 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:28:33,702 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:28:33,703 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:28:33,704 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f841e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:28:33,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30a1c2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:28:33,760 INFO [regionserver/87c3fdb6c570:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:28:33,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d95bc23{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/java.io.tmpdir/jetty-localhost-45865-hadoop-hdfs-3_4_1-tests_jar-_-any-3809496677938548559/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:28:33,797 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78fa6004{HTTP/1.1, (http/1.1)}{localhost:45865} 2024-12-02T21:28:33,797 INFO [Time-limited test {}] server.Server(415): Started @103789ms 2024-12-02T21:28:33,808 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:28:34,206 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:28:34,210 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:28:34,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:28:34,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:28:34,211 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:28:34,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e23c0c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:28:34,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7517d9e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:28:34,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d69c419{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/java.io.tmpdir/jetty-localhost-45645-hadoop-hdfs-3_4_1-tests_jar-_-any-15327407510953293500/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:34,303 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f2378c9{HTTP/1.1, (http/1.1)}{localhost:45645} 2024-12-02T21:28:34,303 INFO [Time-limited test {}] server.Server(415): Started @104295ms 2024-12-02T21:28:34,304 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:28:34,334 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:28:34,337 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:28:34,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:28:34,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:28:34,338 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:28:34,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ce533a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:28:34,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@aab268d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:28:34,429 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75434f63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/java.io.tmpdir/jetty-localhost-40097-hadoop-hdfs-3_4_1-tests_jar-_-any-7373442532388864534/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:34,429 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a2066f8{HTTP/1.1, (http/1.1)}{localhost:40097} 2024-12-02T21:28:34,429 INFO [Time-limited test {}] server.Server(415): Started @104421ms 2024-12-02T21:28:34,430 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:28:36,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:28:36,072 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T21:28:36,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T21:28:36,077 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-02T21:28:36,107 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495/data/data1/current/BP-1163702065-172.17.0.3-1733174913304/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:36,107 WARN [Thread-454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495/data/data2/current/BP-1163702065-172.17.0.3-1733174913304/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:36,127 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:28:36,129 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9bc3cff51c0746a6 with lease ID 0x4f23ce06084f85c4: Processing first storage report for DS-a402d302-adce-4414-930d-0521472bafb4 from datanode DatanodeRegistration(127.0.0.1:45347, datanodeUuid=7096160f-1873-4ba3-b560-b5373a90e86c, infoPort=43857, infoSecurePort=0, ipcPort=45715, storageInfo=lv=-57;cid=testClusterID;nsid=471123223;c=1733174913304) 2024-12-02T21:28:36,129 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9bc3cff51c0746a6 with lease ID 0x4f23ce06084f85c4: from storage DS-a402d302-adce-4414-930d-0521472bafb4 node DatanodeRegistration(127.0.0.1:45347, datanodeUuid=7096160f-1873-4ba3-b560-b5373a90e86c, infoPort=43857, infoSecurePort=0, ipcPort=45715, storageInfo=lv=-57;cid=testClusterID;nsid=471123223;c=1733174913304), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:28:36,130 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9bc3cff51c0746a6 with lease ID 0x4f23ce06084f85c4: Processing first storage report for DS-0cc9548e-0fa9-49a8-83ee-c796d4ee7bfb from datanode DatanodeRegistration(127.0.0.1:45347, datanodeUuid=7096160f-1873-4ba3-b560-b5373a90e86c, infoPort=43857, infoSecurePort=0, ipcPort=45715, storageInfo=lv=-57;cid=testClusterID;nsid=471123223;c=1733174913304) 2024-12-02T21:28:36,130 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9bc3cff51c0746a6 with lease ID 0x4f23ce06084f85c4: from storage DS-0cc9548e-0fa9-49a8-83ee-c796d4ee7bfb node DatanodeRegistration(127.0.0.1:45347, datanodeUuid=7096160f-1873-4ba3-b560-b5373a90e86c, infoPort=43857, infoSecurePort=0, ipcPort=45715, storageInfo=lv=-57;cid=testClusterID;nsid=471123223;c=1733174913304), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:28:36,282 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495/data/data4/current/BP-1163702065-172.17.0.3-1733174913304/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:36,282 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495/data/data3/current/BP-1163702065-172.17.0.3-1733174913304/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:36,300 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:28:36,302 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x54fe3a78ea90f09c with lease ID 0x4f23ce06084f85c5: Processing first storage report for DS-4be13c37-69bb-484e-840d-a6bfb82a7bb5 from datanode DatanodeRegistration(127.0.0.1:35773, datanodeUuid=c946e817-1ee6-411c-801d-d8d2599e3531, infoPort=35643, infoSecurePort=0, ipcPort=42635, storageInfo=lv=-57;cid=testClusterID;nsid=471123223;c=1733174913304) 2024-12-02T21:28:36,302 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x54fe3a78ea90f09c with lease ID 0x4f23ce06084f85c5: from storage DS-4be13c37-69bb-484e-840d-a6bfb82a7bb5 node DatanodeRegistration(127.0.0.1:35773, datanodeUuid=c946e817-1ee6-411c-801d-d8d2599e3531, infoPort=35643, infoSecurePort=0, ipcPort=42635, storageInfo=lv=-57;cid=testClusterID;nsid=471123223;c=1733174913304), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T21:28:36,302 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x54fe3a78ea90f09c with lease ID 0x4f23ce06084f85c5: Processing first storage report for DS-1ece7e5d-e813-4889-b312-b753f989b7b5 from datanode DatanodeRegistration(127.0.0.1:35773, datanodeUuid=c946e817-1ee6-411c-801d-d8d2599e3531, infoPort=35643, infoSecurePort=0, ipcPort=42635, storageInfo=lv=-57;cid=testClusterID;nsid=471123223;c=1733174913304) 2024-12-02T21:28:36,302 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x54fe3a78ea90f09c with lease ID 0x4f23ce06084f85c5: from storage DS-1ece7e5d-e813-4889-b312-b753f989b7b5 node DatanodeRegistration(127.0.0.1:35773, datanodeUuid=c946e817-1ee6-411c-801d-d8d2599e3531, infoPort=35643, infoSecurePort=0, ipcPort=42635, storageInfo=lv=-57;cid=testClusterID;nsid=471123223;c=1733174913304), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:28:36,374 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b 2024-12-02T21:28:36,377 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495/zookeeper_0, clientPort=49401, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:28:36,378 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49401 2024-12-02T21:28:36,379 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:36,381 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:36,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:28:36,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:28:36,392 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc with version=8 2024-12-02T21:28:36,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/hbase-staging 2024-12-02T21:28:36,394 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:28:36,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:36,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:36,394 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:28:36,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:36,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:28:36,394 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T21:28:36,394 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:28:36,395 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42837 2024-12-02T21:28:36,396 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42837 connecting to ZooKeeper ensemble=127.0.0.1:49401 2024-12-02T21:28:36,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:428370x0, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:28:36,546 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42837-0x10197f270c40000 connected 2024-12-02T21:28:36,633 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:36,635 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:36,639 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:28:36,640 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc, hbase.cluster.distributed=false 2024-12-02T21:28:36,642 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:28:36,642 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42837 2024-12-02T21:28:36,642 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42837 2024-12-02T21:28:36,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42837 2024-12-02T21:28:36,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42837 2024-12-02T21:28:36,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42837 2024-12-02T21:28:36,658 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:28:36,658 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:36,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:36,659 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:28:36,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:36,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:28:36,659 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:28:36,659 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:28:36,660 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44625 2024-12-02T21:28:36,661 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44625 connecting to ZooKeeper ensemble=127.0.0.1:49401 2024-12-02T21:28:36,662 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:36,664 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:36,675 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446250x0, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:28:36,675 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:28:36,675 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44625-0x10197f270c40001 connected 2024-12-02T21:28:36,676 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:28:36,677 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:28:36,678 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:28:36,680 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:28:36,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44625 2024-12-02T21:28:36,683 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44625 2024-12-02T21:28:36,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44625 2024-12-02T21:28:36,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44625 2024-12-02T21:28:36,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44625 2024-12-02T21:28:36,700 DEBUG [M:0;87c3fdb6c570:42837 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87c3fdb6c570:42837 2024-12-02T21:28:36,700 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87c3fdb6c570,42837,1733174916393 2024-12-02T21:28:36,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:28:36,706 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:28:36,707 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87c3fdb6c570,42837,1733174916393 2024-12-02T21:28:36,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:36,717 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:28:36,717 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:36,718 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:28:36,718 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87c3fdb6c570,42837,1733174916393 from backup master directory 2024-12-02T21:28:36,809 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:28:36,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87c3fdb6c570,42837,1733174916393 2024-12-02T21:28:36,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:28:36,809 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:28:36,809 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87c3fdb6c570,42837,1733174916393 2024-12-02T21:28:36,820 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/hbase.id] with ID: c3f6943d-927f-490e-a5a5-93606d33ca7e 2024-12-02T21:28:36,820 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/.tmp/hbase.id 2024-12-02T21:28:36,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:28:36,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:28:36,828 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/.tmp/hbase.id]:[hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/hbase.id] 2024-12-02T21:28:36,844 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:36,844 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T21:28:36,846 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-02T21:28:36,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:36,875 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:36,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:28:36,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:28:36,883 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:28:36,884 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:28:36,885 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:28:36,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:28:36,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:28:36,894 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store 2024-12-02T21:28:36,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:28:36,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:28:36,903 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:28:36,903 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:28:36,903 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:36,903 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:36,903 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:28:36,903 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:36,903 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:36,903 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733174916903Disabling compacts and flushes for region at 1733174916903Disabling writes for close at 1733174916903Writing region close event to WAL at 1733174916903Closed at 1733174916903 2024-12-02T21:28:36,905 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/.initializing 2024-12-02T21:28:36,905 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/WALs/87c3fdb6c570,42837,1733174916393 2024-12-02T21:28:36,908 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C42837%2C1733174916393, suffix=, logDir=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/WALs/87c3fdb6c570,42837,1733174916393, archiveDir=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/oldWALs, maxLogs=10 2024-12-02T21:28:36,908 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42837%2C1733174916393.1733174916908 2024-12-02T21:28:36,914 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/WALs/87c3fdb6c570,42837,1733174916393/87c3fdb6c570%2C42837%2C1733174916393.1733174916908 2024-12-02T21:28:36,915 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43857:43857),(127.0.0.1/127.0.0.1:35643:35643)] 2024-12-02T21:28:36,916 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:28:36,916 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:28:36,916 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,916 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,922 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:28:36,923 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:36,923 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:36,923 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,925 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:28:36,925 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:36,926 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:28:36,926 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,929 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:28:36,929 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:36,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:28:36,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,932 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:28:36,932 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:36,933 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:28:36,933 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,934 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,934 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,936 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,936 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,937 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:28:36,938 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:36,940 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:28:36,941 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741446, jitterRate=-0.05720348656177521}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:28:36,942 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733174916916Initializing all the Stores at 1733174916918 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174916918Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174916920 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174916920Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174916920Cleaning up temporary data from old regions at 1733174916936 (+16 ms)Region opened successfully at 1733174916942 (+6 ms) 2024-12-02T21:28:36,942 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:28:36,946 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f726e18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:28:36,947 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T21:28:36,948 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:28:36,948 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:28:36,948 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:28:36,949 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T21:28:36,949 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T21:28:36,949 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:28:36,952 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:28:36,953 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:28:36,959 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:28:36,959 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:28:36,960 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:28:36,969 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:28:36,970 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:28:36,971 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:28:36,980 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:28:36,981 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:28:36,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:36,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:36,990 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:28:36,993 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:28:37,001 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:28:37,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:28:37,011 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:28:37,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:37,011 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:37,012 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87c3fdb6c570,42837,1733174916393, sessionid=0x10197f270c40000, setting cluster-up flag (Was=false) 2024-12-02T21:28:37,033 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:37,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:37,065 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:28:37,069 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,42837,1733174916393 2024-12-02T21:28:37,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:37,093 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:37,127 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:28:37,131 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,42837,1733174916393 2024-12-02T21:28:37,133 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T21:28:37,138 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T21:28:37,139 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T21:28:37,139 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:28:37,139 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87c3fdb6c570,42837,1733174916393 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:28:37,141 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:28:37,141 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:28:37,142 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:28:37,142 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:28:37,142 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87c3fdb6c570:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:28:37,142 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,142 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:28:37,142 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,143 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733174947143 2024-12-02T21:28:37,143 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:28:37,143 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:28:37,143 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:28:37,143 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:28:37,143 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:28:37,143 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:28:37,143 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,144 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:28:37,144 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:28:37,144 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:28:37,144 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:28:37,144 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:28:37,144 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:28:37,144 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:28:37,144 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174917144,5,FailOnTimeoutGroup] 2024-12-02T21:28:37,145 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174917145,5,FailOnTimeoutGroup] 2024-12-02T21:28:37,145 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,145 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:28:37,145 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,145 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,145 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:37,145 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:28:37,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:28:37,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:28:37,154 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T21:28:37,154 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc 2024-12-02T21:28:37,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:28:37,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:28:37,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:28:37,166 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:28:37,169 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:28:37,169 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:37,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:37,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:28:37,171 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:28:37,171 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:37,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:37,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:28:37,175 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:28:37,175 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:37,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:37,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:28:37,177 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:28:37,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:37,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:37,178 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:28:37,179 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/1588230740 2024-12-02T21:28:37,180 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/1588230740 2024-12-02T21:28:37,182 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:28:37,182 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:28:37,182 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:28:37,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:28:37,186 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:28:37,187 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711789, jitterRate=-0.09491436183452606}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:28:37,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733174917165Initializing all the Stores at 1733174917166 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174917166Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174917166Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174917166Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174917166Cleaning up temporary data from old regions at 1733174917182 (+16 ms)Region opened successfully at 1733174917188 (+6 ms) 2024-12-02T21:28:37,189 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:28:37,189 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:28:37,189 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:28:37,189 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:28:37,189 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:28:37,189 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:28:37,189 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733174917188Disabling compacts and flushes for region at 1733174917188Disabling writes for close at 1733174917189 (+1 ms)Writing region close event to WAL at 1733174917189Closed at 1733174917189 2024-12-02T21:28:37,191 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(746): ClusterId : c3f6943d-927f-490e-a5a5-93606d33ca7e 2024-12-02T21:28:37,191 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:28:37,191 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:28:37,191 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T21:28:37,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:28:37,193 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:28:37,195 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:28:37,202 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:28:37,202 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:28:37,213 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:28:37,213 DEBUG [RS:0;87c3fdb6c570:44625 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a4e1a13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:28:37,224 DEBUG [RS:0;87c3fdb6c570:44625 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87c3fdb6c570:44625 2024-12-02T21:28:37,225 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T21:28:37,225 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T21:28:37,225 DEBUG [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T21:28:37,226 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(2659): reportForDuty to master=87c3fdb6c570,42837,1733174916393 with port=44625, startcode=1733174916658 2024-12-02T21:28:37,226 DEBUG [RS:0;87c3fdb6c570:44625 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:28:37,229 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59159, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:28:37,229 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42837 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87c3fdb6c570,44625,1733174916658 2024-12-02T21:28:37,229 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42837 {}] master.ServerManager(517): Registering regionserver=87c3fdb6c570,44625,1733174916658 2024-12-02T21:28:37,232 DEBUG [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc 2024-12-02T21:28:37,232 DEBUG [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44183 2024-12-02T21:28:37,232 DEBUG [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T21:28:37,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:28:37,244 DEBUG [RS:0;87c3fdb6c570:44625 {}] zookeeper.ZKUtil(111): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87c3fdb6c570,44625,1733174916658 2024-12-02T21:28:37,244 WARN [RS:0;87c3fdb6c570:44625 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:28:37,244 INFO [RS:0;87c3fdb6c570:44625 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:28:37,244 DEBUG [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/WALs/87c3fdb6c570,44625,1733174916658 2024-12-02T21:28:37,244 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87c3fdb6c570,44625,1733174916658] 2024-12-02T21:28:37,249 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:28:37,250 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:28:37,251 INFO [RS:0;87c3fdb6c570:44625 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:28:37,251 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,251 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T21:28:37,252 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T21:28:37,252 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,252 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,252 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,252 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,252 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,252 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,252 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:28:37,253 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,253 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,253 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,253 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,253 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,253 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:37,253 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:28:37,253 DEBUG [RS:0;87c3fdb6c570:44625 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:28:37,253 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,253 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,253 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,254 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,254 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,254 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,44625,1733174916658-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:28:37,267 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:28:37,267 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,44625,1733174916658-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,267 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,267 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.Replication(171): 87c3fdb6c570,44625,1733174916658 started 2024-12-02T21:28:37,279 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,279 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(1482): Serving as 87c3fdb6c570,44625,1733174916658, RpcServer on 87c3fdb6c570/172.17.0.3:44625, sessionid=0x10197f270c40001 2024-12-02T21:28:37,279 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:28:37,279 DEBUG [RS:0;87c3fdb6c570:44625 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87c3fdb6c570,44625,1733174916658 2024-12-02T21:28:37,280 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,44625,1733174916658' 2024-12-02T21:28:37,280 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:28:37,280 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:28:37,281 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:28:37,281 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:28:37,281 DEBUG [RS:0;87c3fdb6c570:44625 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87c3fdb6c570,44625,1733174916658 2024-12-02T21:28:37,281 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,44625,1733174916658' 2024-12-02T21:28:37,281 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:28:37,282 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:28:37,282 DEBUG [RS:0;87c3fdb6c570:44625 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:28:37,282 INFO [RS:0;87c3fdb6c570:44625 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:28:37,282 INFO [RS:0;87c3fdb6c570:44625 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:28:37,345 WARN [87c3fdb6c570:42837 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T21:28:37,388 INFO [RS:0;87c3fdb6c570:44625 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C44625%2C1733174916658, suffix=, logDir=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/WALs/87c3fdb6c570,44625,1733174916658, archiveDir=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/oldWALs, maxLogs=32 2024-12-02T21:28:37,393 INFO [RS:0;87c3fdb6c570:44625 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C44625%2C1733174916658.1733174917393 2024-12-02T21:28:37,401 INFO [RS:0;87c3fdb6c570:44625 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/WALs/87c3fdb6c570,44625,1733174916658/87c3fdb6c570%2C44625%2C1733174916658.1733174917393 2024-12-02T21:28:37,403 DEBUG [RS:0;87c3fdb6c570:44625 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43857:43857),(127.0.0.1/127.0.0.1:35643:35643)] 2024-12-02T21:28:37,501 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:28:37,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:37,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:37,596 DEBUG [87c3fdb6c570:42837 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:28:37,597 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87c3fdb6c570,44625,1733174916658 2024-12-02T21:28:37,599 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,44625,1733174916658, state=OPENING 2024-12-02T21:28:37,654 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:28:37,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:37,664 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:37,665 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:28:37,665 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:28:37,665 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:28:37,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,44625,1733174916658}] 2024-12-02T21:28:37,819 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:28:37,821 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50395, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:28:37,826 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T21:28:37,826 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:28:37,829 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C44625%2C1733174916658.meta, suffix=.meta, logDir=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/WALs/87c3fdb6c570,44625,1733174916658, archiveDir=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/oldWALs, maxLogs=32 2024-12-02T21:28:37,831 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C44625%2C1733174916658.meta.1733174917831.meta 2024-12-02T21:28:37,837 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/WALs/87c3fdb6c570,44625,1733174916658/87c3fdb6c570%2C44625%2C1733174916658.meta.1733174917831.meta 2024-12-02T21:28:37,838 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35643:35643),(127.0.0.1/127.0.0.1:43857:43857)] 2024-12-02T21:28:37,839 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:28:37,839 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:28:37,839 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:28:37,839 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:28:37,839 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:28:37,839 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:28:37,839 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T21:28:37,839 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T21:28:37,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:28:37,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:28:37,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:37,842 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:37,842 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:28:37,843 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:28:37,843 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:37,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:37,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:28:37,845 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:28:37,845 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:37,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:37,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:28:37,846 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:28:37,847 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:37,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:37,847 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:28:37,848 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/1588230740 2024-12-02T21:28:37,850 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/1588230740 2024-12-02T21:28:37,851 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:28:37,851 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:28:37,852 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:28:37,853 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:28:37,856 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860598, jitterRate=0.09430715441703796}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:28:37,857 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T21:28:37,857 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733174917839Writing region info on filesystem at 1733174917839Initializing all the Stores at 1733174917840 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174917840Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174917841 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174917841Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174917841Cleaning up temporary data from old regions at 1733174917851 (+10 ms)Running coprocessor post-open hooks at 1733174917857 (+6 ms)Region opened successfully at 1733174917857 2024-12-02T21:28:37,859 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733174917819 2024-12-02T21:28:37,862 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:28:37,862 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T21:28:37,863 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,44625,1733174916658 2024-12-02T21:28:37,864 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,44625,1733174916658, state=OPEN 2024-12-02T21:28:37,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:28:37,906 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:28:37,906 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87c3fdb6c570,44625,1733174916658 2024-12-02T21:28:37,906 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:28:37,906 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:28:37,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:28:37,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,44625,1733174916658 in 241 msec 2024-12-02T21:28:37,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:28:37,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 719 msec 2024-12-02T21:28:37,916 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:28:37,916 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T21:28:37,918 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:28:37,918 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,44625,1733174916658, seqNum=-1] 2024-12-02T21:28:37,918 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:28:37,920 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50309, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:28:37,928 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 792 msec 2024-12-02T21:28:37,929 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733174917929, completionTime=-1 2024-12-02T21:28:37,929 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:28:37,929 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T21:28:37,931 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T21:28:37,932 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733174977932 2024-12-02T21:28:37,932 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733175037932 2024-12-02T21:28:37,932 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-02T21:28:37,932 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,42837,1733174916393-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,932 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,42837,1733174916393-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,933 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,42837,1733174916393-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,933 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87c3fdb6c570:42837, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,933 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,933 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,936 DEBUG [master/87c3fdb6c570:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T21:28:37,938 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.129sec 2024-12-02T21:28:37,938 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:28:37,938 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:28:37,938 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:28:37,939 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:28:37,939 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:28:37,939 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,42837,1733174916393-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:28:37,939 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,42837,1733174916393-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:28:37,941 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:28:37,941 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:28:37,941 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,42837,1733174916393-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:37,991 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3aadf114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:28:37,991 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87c3fdb6c570,42837,-1 for getting cluster id 2024-12-02T21:28:37,991 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T21:28:37,993 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c3f6943d-927f-490e-a5a5-93606d33ca7e' 2024-12-02T21:28:37,994 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T21:28:37,994 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c3f6943d-927f-490e-a5a5-93606d33ca7e" 2024-12-02T21:28:37,994 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22b8ea1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:28:37,994 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87c3fdb6c570,42837,-1] 2024-12-02T21:28:37,994 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T21:28:37,995 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:37,997 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46994, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T21:28:37,998 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@510cc70d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:28:37,998 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:28:38,000 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,44625,1733174916658, seqNum=-1] 2024-12-02T21:28:38,000 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:28:38,002 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48366, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:28:38,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87c3fdb6c570,42837,1733174916393 2024-12-02T21:28:38,005 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:38,008 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T21:28:38,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T21:28:38,008 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:28:38,008 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:28:38,009 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:38,009 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:38,009 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T21:28:38,009 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:28:38,009 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=904512183, stopped=false 2024-12-02T21:28:38,009 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87c3fdb6c570,42837,1733174916393 2024-12-02T21:28:38,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:28:38,030 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:28:38,030 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:38,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:38,030 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:28:38,030 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:28:38,031 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:28:38,031 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:28:38,031 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:28:38,031 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:38,031 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87c3fdb6c570,44625,1733174916658' ***** 2024-12-02T21:28:38,031 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T21:28:38,031 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:28:38,031 INFO [RS:0;87c3fdb6c570:44625 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:28:38,031 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T21:28:38,031 INFO [RS:0;87c3fdb6c570:44625 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:28:38,032 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(959): stopping server 87c3fdb6c570,44625,1733174916658 2024-12-02T21:28:38,032 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:28:38,032 INFO [RS:0;87c3fdb6c570:44625 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87c3fdb6c570:44625. 2024-12-02T21:28:38,032 DEBUG [RS:0;87c3fdb6c570:44625 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:28:38,032 DEBUG [RS:0;87c3fdb6c570:44625 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:38,032 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:28:38,032 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:28:38,032 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:28:38,032 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T21:28:38,032 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T21:28:38,032 DEBUG [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-02T21:28:38,033 DEBUG [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-02T21:28:38,033 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:28:38,033 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:28:38,033 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:28:38,033 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:28:38,033 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:28:38,033 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-02T21:28:38,057 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/1588230740/.tmp/ns/fd71206765754b23a59231d1c0923155 is 43, key is default/ns:d/1733174917921/Put/seqid=0 2024-12-02T21:28:38,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741835_1011 (size=5153) 2024-12-02T21:28:38,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741835_1011 (size=5153) 2024-12-02T21:28:38,063 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/1588230740/.tmp/ns/fd71206765754b23a59231d1c0923155 2024-12-02T21:28:38,073 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/1588230740/.tmp/ns/fd71206765754b23a59231d1c0923155 as hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/1588230740/ns/fd71206765754b23a59231d1c0923155 2024-12-02T21:28:38,082 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/1588230740/ns/fd71206765754b23a59231d1c0923155, entries=2, sequenceid=6, filesize=5.0 K 2024-12-02T21:28:38,083 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 50ms, sequenceid=6, compaction requested=false 2024-12-02T21:28:38,083 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T21:28:38,089 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T21:28:38,089 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:28:38,089 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:28:38,089 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733174918032Running coprocessor pre-close hooks at 1733174918032Disabling compacts and flushes for region at 1733174918032Disabling writes for close at 1733174918033 (+1 ms)Obtaining lock to block concurrent updates at 1733174918033Preparing flush snapshotting stores in 1588230740 at 1733174918033Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733174918033Flushing stores of hbase:meta,,1.1588230740 at 1733174918034 (+1 ms)Flushing 1588230740/ns: creating writer at 1733174918034Flushing 1588230740/ns: appending metadata at 1733174918056 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1733174918056Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2772afe5: reopening flushed file at 1733174918072 (+16 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 50ms, sequenceid=6, compaction requested=false at 1733174918083 (+11 ms)Writing region close event to WAL at 1733174918084 (+1 ms)Running coprocessor post-close hooks at 1733174918089 (+5 ms)Closed at 1733174918089 2024-12-02T21:28:38,090 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:28:38,233 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(976): stopping server 87c3fdb6c570,44625,1733174916658; all regions closed. 2024-12-02T21:28:38,234 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,234 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,234 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,234 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,234 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741834_1010 (size=1152) 2024-12-02T21:28:38,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741834_1010 (size=1152) 2024-12-02T21:28:38,242 DEBUG [RS:0;87c3fdb6c570:44625 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/oldWALs 2024-12-02T21:28:38,242 INFO [RS:0;87c3fdb6c570:44625 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C44625%2C1733174916658.meta:.meta(num 1733174917831) 2024-12-02T21:28:38,243 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,243 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,243 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,244 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,244 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741833_1009 (size=93) 2024-12-02T21:28:38,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741833_1009 (size=93) 2024-12-02T21:28:38,251 DEBUG [RS:0;87c3fdb6c570:44625 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/oldWALs 2024-12-02T21:28:38,251 INFO [RS:0;87c3fdb6c570:44625 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C44625%2C1733174916658:(num 1733174917393) 2024-12-02T21:28:38,251 DEBUG [RS:0;87c3fdb6c570:44625 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:38,251 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:28:38,251 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:28:38,252 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.ChoreService(370): Chore service for: regionserver/87c3fdb6c570:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T21:28:38,252 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:28:38,252 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:28:38,252 INFO [RS:0;87c3fdb6c570:44625 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44625 2024-12-02T21:28:38,264 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87c3fdb6c570,44625,1733174916658 2024-12-02T21:28:38,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:28:38,264 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:28:38,265 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87c3fdb6c570,44625,1733174916658] 2024-12-02T21:28:38,285 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87c3fdb6c570,44625,1733174916658 already deleted, retry=false 2024-12-02T21:28:38,285 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87c3fdb6c570,44625,1733174916658 expired; onlineServers=0 2024-12-02T21:28:38,285 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87c3fdb6c570,42837,1733174916393' ***** 2024-12-02T21:28:38,286 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:28:38,286 INFO [M:0;87c3fdb6c570:42837 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:28:38,286 INFO [M:0;87c3fdb6c570:42837 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:28:38,286 DEBUG [M:0;87c3fdb6c570:42837 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:28:38,286 DEBUG [M:0;87c3fdb6c570:42837 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:28:38,286 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:28:38,286 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174917144 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174917144,5,FailOnTimeoutGroup] 2024-12-02T21:28:38,286 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174917145 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174917145,5,FailOnTimeoutGroup] 2024-12-02T21:28:38,287 INFO [M:0;87c3fdb6c570:42837 {}] hbase.ChoreService(370): Chore service for: master/87c3fdb6c570:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T21:28:38,287 INFO [M:0;87c3fdb6c570:42837 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:28:38,288 DEBUG [M:0;87c3fdb6c570:42837 {}] master.HMaster(1795): Stopping service threads 2024-12-02T21:28:38,288 INFO [M:0;87c3fdb6c570:42837 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:28:38,288 INFO [M:0;87c3fdb6c570:42837 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:28:38,289 INFO [M:0;87c3fdb6c570:42837 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:28:38,289 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:28:38,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:28:38,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:38,296 DEBUG [M:0;87c3fdb6c570:42837 {}] zookeeper.ZKUtil(347): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:28:38,296 WARN [M:0;87c3fdb6c570:42837 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:28:38,297 INFO [M:0;87c3fdb6c570:42837 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/.lastflushedseqids 2024-12-02T21:28:38,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741836_1012 (size=99) 2024-12-02T21:28:38,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741836_1012 (size=99) 2024-12-02T21:28:38,307 INFO [M:0;87c3fdb6c570:42837 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T21:28:38,307 INFO [M:0;87c3fdb6c570:42837 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:28:38,307 DEBUG [M:0;87c3fdb6c570:42837 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:28:38,307 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:38,307 DEBUG [M:0;87c3fdb6c570:42837 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:38,307 DEBUG [M:0;87c3fdb6c570:42837 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:28:38,307 DEBUG [M:0;87c3fdb6c570:42837 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:38,308 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-02T21:28:38,328 DEBUG [M:0;87c3fdb6c570:42837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fe5671d221204013957a3bf9be8fe1f7 is 82, key is hbase:meta,,1/info:regioninfo/1733174917863/Put/seqid=0 2024-12-02T21:28:38,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741837_1013 (size=5672) 2024-12-02T21:28:38,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741837_1013 (size=5672) 2024-12-02T21:28:38,335 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fe5671d221204013957a3bf9be8fe1f7 2024-12-02T21:28:38,356 DEBUG [M:0;87c3fdb6c570:42837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fa6d626e8f334c59a42caac773a5fcda is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733174917927/Put/seqid=0 2024-12-02T21:28:38,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741838_1014 (size=5275) 2024-12-02T21:28:38,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741838_1014 (size=5275) 2024-12-02T21:28:38,362 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fa6d626e8f334c59a42caac773a5fcda 2024-12-02T21:28:38,375 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:28:38,375 INFO [RS:0;87c3fdb6c570:44625 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:28:38,375 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44625-0x10197f270c40001, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:28:38,375 INFO [RS:0;87c3fdb6c570:44625 {}] regionserver.HRegionServer(1031): Exiting; stopping=87c3fdb6c570,44625,1733174916658; zookeeper connection closed. 2024-12-02T21:28:38,375 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@47760ea6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@47760ea6 2024-12-02T21:28:38,375 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T21:28:38,385 DEBUG [M:0;87c3fdb6c570:42837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/394b34ab446b4894aa9ccb6e362f52f3 is 69, key is 87c3fdb6c570,44625,1733174916658/rs:state/1733174917230/Put/seqid=0 2024-12-02T21:28:38,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741839_1015 (size=5156) 2024-12-02T21:28:38,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741839_1015 (size=5156) 2024-12-02T21:28:38,392 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/394b34ab446b4894aa9ccb6e362f52f3 2024-12-02T21:28:38,415 DEBUG [M:0;87c3fdb6c570:42837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6b52ece6a1824fae9c2f4e4b6c59c262 is 52, key is load_balancer_on/state:d/1733174918007/Put/seqid=0 2024-12-02T21:28:38,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741840_1016 (size=5056) 2024-12-02T21:28:38,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741840_1016 (size=5056) 2024-12-02T21:28:38,423 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6b52ece6a1824fae9c2f4e4b6c59c262 2024-12-02T21:28:38,430 DEBUG [M:0;87c3fdb6c570:42837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fe5671d221204013957a3bf9be8fe1f7 as hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fe5671d221204013957a3bf9be8fe1f7 2024-12-02T21:28:38,437 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fe5671d221204013957a3bf9be8fe1f7, entries=8, sequenceid=29, filesize=5.5 K 2024-12-02T21:28:38,439 DEBUG [M:0;87c3fdb6c570:42837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fa6d626e8f334c59a42caac773a5fcda as hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fa6d626e8f334c59a42caac773a5fcda 2024-12-02T21:28:38,445 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fa6d626e8f334c59a42caac773a5fcda, entries=3, sequenceid=29, filesize=5.2 K 2024-12-02T21:28:38,447 DEBUG [M:0;87c3fdb6c570:42837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/394b34ab446b4894aa9ccb6e362f52f3 as hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/394b34ab446b4894aa9ccb6e362f52f3 2024-12-02T21:28:38,453 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/394b34ab446b4894aa9ccb6e362f52f3, entries=1, sequenceid=29, filesize=5.0 K 2024-12-02T21:28:38,455 DEBUG [M:0;87c3fdb6c570:42837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6b52ece6a1824fae9c2f4e4b6c59c262 as hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6b52ece6a1824fae9c2f4e4b6c59c262 2024-12-02T21:28:38,461 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44183/user/jenkins/test-data/960112fc-cfc1-3428-882e-590a68b0efcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6b52ece6a1824fae9c2f4e4b6c59c262, entries=1, sequenceid=29, filesize=4.9 K 2024-12-02T21:28:38,462 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=29, compaction requested=false 2024-12-02T21:28:38,463 INFO [M:0;87c3fdb6c570:42837 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:38,464 DEBUG [M:0;87c3fdb6c570:42837 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733174918307Disabling compacts and flushes for region at 1733174918307Disabling writes for close at 1733174918307Obtaining lock to block concurrent updates at 1733174918308 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733174918308Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733174918310 (+2 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733174918311 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733174918311Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733174918328 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733174918328Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733174918341 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733174918356 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733174918356Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733174918369 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733174918385 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733174918385Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733174918399 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733174918415 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733174918415Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b35e5bd: reopening flushed file at 1733174918429 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a200ef7: reopening flushed file at 1733174918438 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e075b71: reopening flushed file at 1733174918445 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15af4fd5: reopening flushed file at 1733174918453 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=29, compaction requested=false at 1733174918462 (+9 ms)Writing region close event to WAL at 1733174918463 (+1 ms)Closed at 1733174918463 2024-12-02T21:28:38,464 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,464 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,464 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,464 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,464 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:38,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35773 is added to blk_1073741830_1006 (size=10311) 2024-12-02T21:28:38,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45347 is added to blk_1073741830_1006 (size=10311) 2024-12-02T21:28:38,467 INFO [M:0;87c3fdb6c570:42837 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T21:28:38,467 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:28:38,467 INFO [M:0;87c3fdb6c570:42837 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42837 2024-12-02T21:28:38,467 INFO [M:0;87c3fdb6c570:42837 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:28:38,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:28:38,575 INFO [M:0;87c3fdb6c570:42837 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:28:38,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42837-0x10197f270c40000, quorum=127.0.0.1:49401, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:28:38,604 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75434f63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:38,604 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a2066f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:28:38,604 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:28:38,604 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@aab268d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:28:38,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ce533a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/hadoop.log.dir/,STOPPED} 2024-12-02T21:28:38,606 WARN [BP-1163702065-172.17.0.3-1733174913304 heartbeating to localhost/127.0.0.1:44183 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:28:38,606 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:28:38,606 WARN [BP-1163702065-172.17.0.3-1733174913304 heartbeating to localhost/127.0.0.1:44183 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1163702065-172.17.0.3-1733174913304 (Datanode Uuid c946e817-1ee6-411c-801d-d8d2599e3531) service to localhost/127.0.0.1:44183 2024-12-02T21:28:38,606 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:28:38,607 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495/data/data3/current/BP-1163702065-172.17.0.3-1733174913304 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:38,608 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495/data/data4/current/BP-1163702065-172.17.0.3-1733174913304 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:38,608 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:28:38,611 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d69c419{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:38,611 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f2378c9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:28:38,611 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:28:38,612 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7517d9e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:28:38,612 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e23c0c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/hadoop.log.dir/,STOPPED} 2024-12-02T21:28:38,613 WARN [BP-1163702065-172.17.0.3-1733174913304 heartbeating to localhost/127.0.0.1:44183 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:28:38,613 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:28:38,613 WARN [BP-1163702065-172.17.0.3-1733174913304 heartbeating to localhost/127.0.0.1:44183 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1163702065-172.17.0.3-1733174913304 (Datanode Uuid 7096160f-1873-4ba3-b560-b5373a90e86c) service to localhost/127.0.0.1:44183 2024-12-02T21:28:38,613 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:28:38,614 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495/data/data1/current/BP-1163702065-172.17.0.3-1733174913304 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:38,614 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/cluster_38944116-0276-7484-489b-d6182f0e9495/data/data2/current/BP-1163702065-172.17.0.3-1733174913304 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:38,615 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:28:38,621 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d95bc23{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:28:38,621 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78fa6004{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:28:38,622 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:28:38,622 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30a1c2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:28:38,622 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f841e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/hadoop.log.dir/,STOPPED} 2024-12-02T21:28:38,627 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:28:38,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T21:28:38,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:28:38,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/hadoop.log.dir so I do NOT create it in target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33 2024-12-02T21:28:38,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7d334b6-fe0c-ff4b-05b1-01055187810b/hadoop.tmp.dir so I do NOT create it in target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33 2024-12-02T21:28:38,644 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670, deleteOnExit=true 2024-12-02T21:28:38,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T21:28:38,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/test.cache.data in system properties and HBase conf 2024-12-02T21:28:38,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:28:38,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:28:38,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:28:38,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:28:38,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T21:28:38,645 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:28:38,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:28:38,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:28:38,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:28:38,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:28:38,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:28:38,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:28:38,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:28:38,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:28:38,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:28:38,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:28:38,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:28:38,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:28:38,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:28:38,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:28:38,658 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:28:38,995 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:28:38,999 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:28:39,000 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:28:39,001 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:28:39,001 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:28:39,003 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:28:39,003 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3150e6db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:28:39,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d790455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:28:39,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7982676d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/java.io.tmpdir/jetty-localhost-36343-hadoop-hdfs-3_4_1-tests_jar-_-any-10580742395372181114/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:28:39,100 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2efbdc75{HTTP/1.1, (http/1.1)}{localhost:36343} 2024-12-02T21:28:39,100 INFO [Time-limited test {}] server.Server(415): Started @109092ms 2024-12-02T21:28:39,112 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:28:39,254 INFO [regionserver/87c3fdb6c570:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:28:39,383 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:28:39,387 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:28:39,388 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:28:39,388 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:28:39,388 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:28:39,390 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bf32f74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:28:39,390 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bb5d847{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:28:39,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5538b075{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/java.io.tmpdir/jetty-localhost-37529-hadoop-hdfs-3_4_1-tests_jar-_-any-4989823278470413909/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:39,482 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f87a993{HTTP/1.1, (http/1.1)}{localhost:37529} 2024-12-02T21:28:39,482 INFO [Time-limited test {}] server.Server(415): Started @109474ms 2024-12-02T21:28:39,483 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:28:39,508 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:28:39,511 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:28:39,511 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:28:39,512 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:28:39,512 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:28:39,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32403ac6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:28:39,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bb19ef9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:28:39,602 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@272348fe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/java.io.tmpdir/jetty-localhost-35719-hadoop-hdfs-3_4_1-tests_jar-_-any-3233987519013130139/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:39,602 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10b53169{HTTP/1.1, (http/1.1)}{localhost:35719} 2024-12-02T21:28:39,602 INFO [Time-limited test {}] server.Server(415): Started @109595ms 2024-12-02T21:28:39,604 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:28:40,661 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data1/current/BP-510899863-172.17.0.3-1733174918669/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:40,661 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data2/current/BP-510899863-172.17.0.3-1733174918669/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:40,688 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:28:40,691 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x93658589e7d42088 with lease ID 0x7c59dd479eb1acdf: Processing first storage report for DS-7558115b-79be-4716-9d89-27c17c543e11 from datanode DatanodeRegistration(127.0.0.1:40921, datanodeUuid=77dd7d53-78d4-48ee-bed7-6e78c79c324a, infoPort=42689, infoSecurePort=0, ipcPort=39023, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669) 2024-12-02T21:28:40,691 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x93658589e7d42088 with lease ID 0x7c59dd479eb1acdf: from storage DS-7558115b-79be-4716-9d89-27c17c543e11 node DatanodeRegistration(127.0.0.1:40921, datanodeUuid=77dd7d53-78d4-48ee-bed7-6e78c79c324a, infoPort=42689, infoSecurePort=0, ipcPort=39023, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T21:28:40,691 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x93658589e7d42088 with lease ID 0x7c59dd479eb1acdf: Processing first storage report for DS-7a01861a-8521-4b00-a266-f58cf09fd4b3 from datanode DatanodeRegistration(127.0.0.1:40921, datanodeUuid=77dd7d53-78d4-48ee-bed7-6e78c79c324a, infoPort=42689, infoSecurePort=0, ipcPort=39023, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669) 2024-12-02T21:28:40,691 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x93658589e7d42088 with lease ID 0x7c59dd479eb1acdf: from storage DS-7a01861a-8521-4b00-a266-f58cf09fd4b3 node DatanodeRegistration(127.0.0.1:40921, datanodeUuid=77dd7d53-78d4-48ee-bed7-6e78c79c324a, infoPort=42689, infoSecurePort=0, ipcPort=39023, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:28:40,791 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data3/current/BP-510899863-172.17.0.3-1733174918669/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:40,791 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data4/current/BP-510899863-172.17.0.3-1733174918669/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:40,813 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:28:40,815 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f7561826feafb2f with lease ID 0x7c59dd479eb1ace0: Processing first storage report for DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955 from datanode DatanodeRegistration(127.0.0.1:37751, datanodeUuid=e7491dd4-eed2-463b-a465-b069e6a1f484, infoPort=39749, infoSecurePort=0, ipcPort=37613, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669) 2024-12-02T21:28:40,815 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f7561826feafb2f with lease ID 0x7c59dd479eb1ace0: from storage DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955 node DatanodeRegistration(127.0.0.1:37751, datanodeUuid=e7491dd4-eed2-463b-a465-b069e6a1f484, infoPort=39749, infoSecurePort=0, ipcPort=37613, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:28:40,816 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f7561826feafb2f with lease ID 0x7c59dd479eb1ace0: Processing first storage report for DS-65ec6835-25dd-4cc8-8a09-1d7a610e306e from datanode DatanodeRegistration(127.0.0.1:37751, datanodeUuid=e7491dd4-eed2-463b-a465-b069e6a1f484, infoPort=39749, infoSecurePort=0, ipcPort=37613, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669) 2024-12-02T21:28:40,816 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f7561826feafb2f with lease ID 0x7c59dd479eb1ace0: from storage DS-65ec6835-25dd-4cc8-8a09-1d7a610e306e node DatanodeRegistration(127.0.0.1:37751, datanodeUuid=e7491dd4-eed2-463b-a465-b069e6a1f484, infoPort=39749, infoSecurePort=0, ipcPort=37613, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:28:40,839 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33 2024-12-02T21:28:40,842 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/zookeeper_0, clientPort=59541, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:28:40,844 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59541 2024-12-02T21:28:40,844 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:40,846 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:40,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40921 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:28:40,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37751 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:28:40,860 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db with version=8 2024-12-02T21:28:40,860 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/hbase-staging 2024-12-02T21:28:40,862 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:28:40,862 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:40,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:40,863 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:28:40,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:40,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:28:40,863 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T21:28:40,863 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:28:40,864 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35361 2024-12-02T21:28:40,865 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35361 connecting to ZooKeeper ensemble=127.0.0.1:59541 2024-12-02T21:28:40,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:353610x0, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:28:40,923 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35361-0x10197f282340000 connected 2024-12-02T21:28:41,001 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:41,003 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:41,005 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:28:41,005 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db, hbase.cluster.distributed=false 2024-12-02T21:28:41,007 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:28:41,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35361 2024-12-02T21:28:41,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35361 2024-12-02T21:28:41,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35361 2024-12-02T21:28:41,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35361 2024-12-02T21:28:41,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35361 2024-12-02T21:28:41,025 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:28:41,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:41,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:41,025 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:28:41,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:41,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:28:41,025 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:28:41,026 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:28:41,026 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42545 2024-12-02T21:28:41,027 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42545 connecting to ZooKeeper ensemble=127.0.0.1:59541 2024-12-02T21:28:41,028 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:41,030 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:41,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425450x0, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:28:41,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:425450x0, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:28:41,044 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42545-0x10197f282340001 connected 2024-12-02T21:28:41,044 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:28:41,044 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:28:41,045 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:28:41,046 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:28:41,046 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42545 2024-12-02T21:28:41,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42545 2024-12-02T21:28:41,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42545 2024-12-02T21:28:41,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42545 2024-12-02T21:28:41,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42545 2024-12-02T21:28:41,061 DEBUG [M:0;87c3fdb6c570:35361 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87c3fdb6c570:35361 2024-12-02T21:28:41,061 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87c3fdb6c570,35361,1733174920862 2024-12-02T21:28:41,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:28:41,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:28:41,075 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87c3fdb6c570,35361,1733174920862 2024-12-02T21:28:41,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:28:41,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:41,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:41,086 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:28:41,087 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87c3fdb6c570,35361,1733174920862 from backup master directory 2024-12-02T21:28:41,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:28:41,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87c3fdb6c570,35361,1733174920862 2024-12-02T21:28:41,096 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:28:41,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:28:41,096 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87c3fdb6c570,35361,1733174920862 2024-12-02T21:28:41,104 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/hbase.id] with ID: 0539d412-0cf5-4c09-adda-4404d34cc312 2024-12-02T21:28:41,104 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/.tmp/hbase.id 2024-12-02T21:28:41,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40921 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:28:41,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37751 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:28:41,113 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/.tmp/hbase.id]:[hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/hbase.id] 2024-12-02T21:28:41,126 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:41,126 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T21:28:41,128 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-02T21:28:41,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:41,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:41,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40921 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:28:41,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37751 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:28:41,144 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:28:41,145 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:28:41,145 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:28:41,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40921 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:28:41,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37751 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:28:41,155 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store 2024-12-02T21:28:41,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37751 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:28:41,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40921 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:28:41,164 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:28:41,164 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:28:41,165 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:41,165 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:41,165 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:28:41,165 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:41,165 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:28:41,165 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733174921164Disabling compacts and flushes for region at 1733174921164Disabling writes for close at 1733174921165 (+1 ms)Writing region close event to WAL at 1733174921165Closed at 1733174921165 2024-12-02T21:28:41,166 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/.initializing 2024-12-02T21:28:41,166 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862 2024-12-02T21:28:41,169 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C35361%2C1733174920862, suffix=, logDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862, archiveDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/oldWALs, maxLogs=10 2024-12-02T21:28:41,170 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C35361%2C1733174920862.1733174921169 2024-12-02T21:28:41,175 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 2024-12-02T21:28:41,176 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42689:42689),(127.0.0.1/127.0.0.1:39749:39749)] 2024-12-02T21:28:41,178 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:28:41,178 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:28:41,178 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,178 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,179 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,181 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:28:41,181 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:41,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:41,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,183 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:28:41,183 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:41,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:28:41,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:28:41,186 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:41,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:28:41,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:28:41,188 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:41,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:28:41,188 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,189 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,190 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,191 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,191 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,192 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:28:41,194 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:28:41,197 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:28:41,197 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692759, jitterRate=-0.11911144852638245}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:28:41,198 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733174921178Initializing all the Stores at 1733174921179 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174921179Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174921179Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174921179Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174921179Cleaning up temporary data from old regions at 1733174921192 (+13 ms)Region opened successfully at 1733174921198 (+6 ms) 2024-12-02T21:28:41,200 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:28:41,203 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6585b4b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:28:41,204 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T21:28:41,204 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:28:41,204 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:28:41,204 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:28:41,205 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T21:28:41,205 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T21:28:41,205 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:28:41,207 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:28:41,208 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:28:41,261 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:28:41,262 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:28:41,263 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:28:41,431 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:28:41,432 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:28:41,433 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:28:41,620 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:28:41,622 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:28:41,815 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:28:41,818 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:28:42,060 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:28:42,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:28:42,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:28:42,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:42,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:42,147 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87c3fdb6c570,35361,1733174920862, sessionid=0x10197f282340000, setting cluster-up flag (Was=false) 2024-12-02T21:28:42,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:42,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:42,295 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:28:42,297 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,35361,1733174920862 2024-12-02T21:28:42,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:42,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:42,348 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:28:42,353 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,35361,1733174920862 2024-12-02T21:28:42,357 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T21:28:42,362 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T21:28:42,363 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T21:28:42,363 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:28:42,363 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87c3fdb6c570,35361,1733174920862 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:28:42,365 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:28:42,365 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:28:42,365 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:28:42,365 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:28:42,366 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87c3fdb6c570:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:28:42,366 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,366 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:28:42,366 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,366 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733174952366 2024-12-02T21:28:42,367 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:28:42,367 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:28:42,367 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:28:42,367 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:28:42,367 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:28:42,367 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:28:42,367 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,367 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:28:42,367 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:28:42,367 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:28:42,367 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:28:42,368 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:28:42,368 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:28:42,368 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:28:42,368 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174922368,5,FailOnTimeoutGroup] 2024-12-02T21:28:42,368 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174922368,5,FailOnTimeoutGroup] 2024-12-02T21:28:42,368 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,369 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:28:42,369 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,369 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,369 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:42,369 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:28:42,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37751 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:28:42,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40921 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:28:42,376 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T21:28:42,376 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db 2024-12-02T21:28:42,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37751 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:28:42,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40921 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:28:42,384 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:28:42,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:28:42,387 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:28:42,387 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:42,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:42,388 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:28:42,389 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:28:42,389 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:42,390 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:42,390 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:28:42,392 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:28:42,392 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:42,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:42,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:28:42,394 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:28:42,394 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:42,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:42,395 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:28:42,396 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740 2024-12-02T21:28:42,396 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740 2024-12-02T21:28:42,397 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:28:42,397 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:28:42,398 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:28:42,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:28:42,401 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:28:42,401 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841110, jitterRate=0.06952762603759766}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:28:42,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733174922384Initializing all the Stores at 1733174922385 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174922385Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174922385Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174922385Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174922385Cleaning up temporary data from old regions at 1733174922397 (+12 ms)Region opened successfully at 1733174922402 (+5 ms) 2024-12-02T21:28:42,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:28:42,402 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:28:42,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:28:42,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:28:42,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:28:42,403 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:28:42,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733174922402Disabling compacts and flushes for region at 1733174922402Disabling writes for close at 1733174922402Writing region close event to WAL at 1733174922403 (+1 ms)Closed at 1733174922403 2024-12-02T21:28:42,404 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:28:42,404 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T21:28:42,404 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:28:42,406 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:28:42,407 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:28:42,454 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(746): ClusterId : 0539d412-0cf5-4c09-adda-4404d34cc312 2024-12-02T21:28:42,454 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:28:42,496 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:28:42,497 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:28:42,507 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:28:42,507 DEBUG [RS:0;87c3fdb6c570:42545 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f9c1512, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:28:42,522 DEBUG [RS:0;87c3fdb6c570:42545 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87c3fdb6c570:42545 2024-12-02T21:28:42,522 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T21:28:42,522 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T21:28:42,522 DEBUG [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T21:28:42,523 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(2659): reportForDuty to master=87c3fdb6c570,35361,1733174920862 with port=42545, startcode=1733174921025 2024-12-02T21:28:42,523 DEBUG [RS:0;87c3fdb6c570:42545 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:28:42,525 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38261, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:28:42,525 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35361 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87c3fdb6c570,42545,1733174921025 2024-12-02T21:28:42,526 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35361 {}] master.ServerManager(517): Registering regionserver=87c3fdb6c570,42545,1733174921025 2024-12-02T21:28:42,527 DEBUG [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db 2024-12-02T21:28:42,527 DEBUG [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43877 2024-12-02T21:28:42,527 DEBUG [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T21:28:42,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:28:42,538 DEBUG [RS:0;87c3fdb6c570:42545 {}] zookeeper.ZKUtil(111): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87c3fdb6c570,42545,1733174921025 2024-12-02T21:28:42,538 WARN [RS:0;87c3fdb6c570:42545 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:28:42,538 INFO [RS:0;87c3fdb6c570:42545 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:28:42,538 DEBUG [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025 2024-12-02T21:28:42,538 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87c3fdb6c570,42545,1733174921025] 2024-12-02T21:28:42,542 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:28:42,544 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:28:42,545 INFO [RS:0;87c3fdb6c570:42545 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:28:42,545 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,545 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T21:28:42,546 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T21:28:42,546 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,546 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,546 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,546 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,546 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,547 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,547 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:28:42,547 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,547 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,547 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,547 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,547 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,547 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:42,547 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:28:42,547 DEBUG [RS:0;87c3fdb6c570:42545 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:28:42,548 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,548 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,548 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,548 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,548 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,548 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,42545,1733174921025-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:28:42,557 WARN [87c3fdb6c570:35361 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T21:28:42,561 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:28:42,561 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,42545,1733174921025-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,561 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,562 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.Replication(171): 87c3fdb6c570,42545,1733174921025 started 2024-12-02T21:28:42,598 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:42,598 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(1482): Serving as 87c3fdb6c570,42545,1733174921025, RpcServer on 87c3fdb6c570/172.17.0.3:42545, sessionid=0x10197f282340001 2024-12-02T21:28:42,598 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:28:42,598 DEBUG [RS:0;87c3fdb6c570:42545 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87c3fdb6c570,42545,1733174921025 2024-12-02T21:28:42,599 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,42545,1733174921025' 2024-12-02T21:28:42,599 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:28:42,599 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:28:42,600 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:28:42,600 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:28:42,600 DEBUG [RS:0;87c3fdb6c570:42545 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87c3fdb6c570,42545,1733174921025 2024-12-02T21:28:42,600 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,42545,1733174921025' 2024-12-02T21:28:42,600 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:28:42,600 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:28:42,601 DEBUG [RS:0;87c3fdb6c570:42545 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:28:42,601 INFO [RS:0;87c3fdb6c570:42545 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:28:42,601 INFO [RS:0;87c3fdb6c570:42545 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:28:42,704 INFO [RS:0;87c3fdb6c570:42545 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C42545%2C1733174921025, suffix=, logDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025, archiveDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs, maxLogs=32 2024-12-02T21:28:42,705 INFO [RS:0;87c3fdb6c570:42545 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42545%2C1733174921025.1733174922705 2024-12-02T21:28:42,714 INFO [RS:0;87c3fdb6c570:42545 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 2024-12-02T21:28:42,715 DEBUG [RS:0;87c3fdb6c570:42545 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39749:39749),(127.0.0.1/127.0.0.1:42689:42689)] 2024-12-02T21:28:42,807 DEBUG [87c3fdb6c570:35361 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:28:42,809 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87c3fdb6c570,42545,1733174921025 2024-12-02T21:28:42,812 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,42545,1733174921025, state=OPENING 2024-12-02T21:28:42,822 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:28:42,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:42,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:28:42,835 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:28:42,835 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:28:42,835 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:28:42,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,42545,1733174921025}] 2024-12-02T21:28:42,990 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:28:42,993 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40389, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:28:42,999 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T21:28:42,999 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:28:43,002 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C42545%2C1733174921025.meta, suffix=.meta, logDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025, archiveDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs, maxLogs=32 2024-12-02T21:28:43,003 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta 2024-12-02T21:28:43,010 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta 2024-12-02T21:28:43,011 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42689:42689),(127.0.0.1/127.0.0.1:39749:39749)] 2024-12-02T21:28:43,012 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:28:43,012 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:28:43,012 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:28:43,012 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:28:43,013 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:28:43,013 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:28:43,013 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T21:28:43,013 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T21:28:43,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:28:43,015 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:28:43,015 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:43,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:43,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:28:43,017 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:28:43,017 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:43,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:43,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:28:43,018 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:28:43,018 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:43,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:43,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:28:43,019 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:28:43,019 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:43,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:28:43,020 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:28:43,021 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740 2024-12-02T21:28:43,022 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740 2024-12-02T21:28:43,023 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:28:43,023 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:28:43,024 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:28:43,026 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:28:43,027 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861394, jitterRate=0.09531927108764648}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:28:43,027 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T21:28:43,027 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733174923013Writing region info on filesystem at 1733174923013Initializing all the Stores at 1733174923014 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174923014Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174923014Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174923014Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174923014Cleaning up temporary data from old regions at 1733174923023 (+9 ms)Running coprocessor post-open hooks at 1733174923027 (+4 ms)Region opened successfully at 1733174923027 2024-12-02T21:28:43,029 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733174922989 2024-12-02T21:28:43,031 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:28:43,032 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T21:28:43,032 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,42545,1733174921025 2024-12-02T21:28:43,034 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,42545,1733174921025, state=OPEN 2024-12-02T21:28:43,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:28:43,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:28:43,071 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87c3fdb6c570,42545,1733174921025 2024-12-02T21:28:43,071 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:28:43,071 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:28:43,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:28:43,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,42545,1733174921025 in 236 msec 2024-12-02T21:28:43,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:28:43,081 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 672 msec 2024-12-02T21:28:43,082 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:28:43,082 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T21:28:43,084 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:28:43,084 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,42545,1733174921025, seqNum=-1] 2024-12-02T21:28:43,085 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:28:43,086 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60613, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:28:43,094 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 733 msec 2024-12-02T21:28:43,094 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733174923094, completionTime=-1 2024-12-02T21:28:43,094 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:28:43,094 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T21:28:43,097 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T21:28:43,097 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733174983097 2024-12-02T21:28:43,097 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733175043097 2024-12-02T21:28:43,097 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-02T21:28:43,097 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35361,1733174920862-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,097 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35361,1733174920862-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,097 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35361,1733174920862-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,097 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87c3fdb6c570:35361, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,097 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,098 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,099 DEBUG [master/87c3fdb6c570:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T21:28:43,101 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.005sec 2024-12-02T21:28:43,102 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:28:43,102 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:28:43,102 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:28:43,102 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:28:43,102 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:28:43,102 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35361,1733174920862-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:28:43,102 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35361,1733174920862-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:28:43,104 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:28:43,104 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:28:43,104 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35361,1733174920862-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,155 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21e18edd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:28:43,155 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87c3fdb6c570,35361,-1 for getting cluster id 2024-12-02T21:28:43,155 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T21:28:43,158 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0539d412-0cf5-4c09-adda-4404d34cc312' 2024-12-02T21:28:43,159 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T21:28:43,160 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0539d412-0cf5-4c09-adda-4404d34cc312" 2024-12-02T21:28:43,160 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ff0542e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:28:43,160 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87c3fdb6c570,35361,-1] 2024-12-02T21:28:43,161 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T21:28:43,162 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:28:43,164 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57038, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T21:28:43,166 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@344f4078, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:28:43,166 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:28:43,168 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,42545,1733174921025, seqNum=-1] 2024-12-02T21:28:43,169 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:28:43,171 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44914, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:28:43,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87c3fdb6c570,35361,1733174920862 2024-12-02T21:28:43,174 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:43,177 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T21:28:43,194 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:28:43,194 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:43,194 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:43,194 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:28:43,194 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:28:43,194 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:28:43,194 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:28:43,194 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:28:43,195 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33183 2024-12-02T21:28:43,196 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33183 connecting to ZooKeeper ensemble=127.0.0.1:59541 2024-12-02T21:28:43,197 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:43,198 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:28:43,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:331830x0, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:28:43,222 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33183-0x10197f282340002 connected 2024-12-02T21:28:43,222 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:33183-0x10197f282340002, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-02T21:28:43,222 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-02T21:28:43,223 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:28:43,224 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:28:43,225 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:33183-0x10197f282340002, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:28:43,227 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33183-0x10197f282340002, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:28:43,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33183 2024-12-02T21:28:43,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33183 2024-12-02T21:28:43,231 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33183 2024-12-02T21:28:43,231 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33183 2024-12-02T21:28:43,231 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33183 2024-12-02T21:28:43,233 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer(746): ClusterId : 0539d412-0cf5-4c09-adda-4404d34cc312 2024-12-02T21:28:43,233 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:28:43,244 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:28:43,244 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:28:43,254 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:28:43,255 DEBUG [RS:1;87c3fdb6c570:33183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bba5bd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:28:43,268 DEBUG [RS:1;87c3fdb6c570:33183 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;87c3fdb6c570:33183 2024-12-02T21:28:43,268 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T21:28:43,268 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T21:28:43,268 DEBUG [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T21:28:43,269 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer(2659): reportForDuty to master=87c3fdb6c570,35361,1733174920862 with port=33183, startcode=1733174923193 2024-12-02T21:28:43,269 DEBUG [RS:1;87c3fdb6c570:33183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:28:43,270 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56465, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:28:43,271 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35361 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87c3fdb6c570,33183,1733174923193 2024-12-02T21:28:43,271 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35361 {}] master.ServerManager(517): Registering regionserver=87c3fdb6c570,33183,1733174923193 2024-12-02T21:28:43,272 DEBUG [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db 2024-12-02T21:28:43,272 DEBUG [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43877 2024-12-02T21:28:43,272 DEBUG [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T21:28:43,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:28:43,285 DEBUG [RS:1;87c3fdb6c570:33183 {}] zookeeper.ZKUtil(111): regionserver:33183-0x10197f282340002, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87c3fdb6c570,33183,1733174923193 2024-12-02T21:28:43,285 WARN [RS:1;87c3fdb6c570:33183 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:28:43,285 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87c3fdb6c570,33183,1733174923193] 2024-12-02T21:28:43,285 INFO [RS:1;87c3fdb6c570:33183 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:28:43,286 DEBUG [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193 2024-12-02T21:28:43,289 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:28:43,291 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:28:43,292 INFO [RS:1;87c3fdb6c570:33183 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:28:43,292 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,292 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T21:28:43,293 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T21:28:43,293 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,293 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:43,293 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:28:43,294 DEBUG [RS:1;87c3fdb6c570:33183 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:28:43,295 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,295 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,295 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,295 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,295 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,295 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,33183,1733174923193-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:28:43,307 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:28:43,308 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,33183,1733174923193-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,308 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,308 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.Replication(171): 87c3fdb6c570,33183,1733174923193 started 2024-12-02T21:28:43,320 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:28:43,320 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer(1482): Serving as 87c3fdb6c570,33183,1733174923193, RpcServer on 87c3fdb6c570/172.17.0.3:33183, sessionid=0x10197f282340002 2024-12-02T21:28:43,320 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:28:43,320 DEBUG [RS:1;87c3fdb6c570:33183 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87c3fdb6c570,33183,1733174923193 2024-12-02T21:28:43,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;87c3fdb6c570:33183,5,FailOnTimeoutGroup] 2024-12-02T21:28:43,320 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,33183,1733174923193' 2024-12-02T21:28:43,320 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:28:43,320 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-02T21:28:43,321 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:28:43,321 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T21:28:43,321 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:28:43,321 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:28:43,321 DEBUG [RS:1;87c3fdb6c570:33183 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87c3fdb6c570,33183,1733174923193 2024-12-02T21:28:43,321 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,33183,1733174923193' 2024-12-02T21:28:43,321 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:28:43,322 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:28:43,322 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 87c3fdb6c570,35361,1733174920862 2024-12-02T21:28:43,322 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6c57f553 2024-12-02T21:28:43,322 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T21:28:43,322 DEBUG [RS:1;87c3fdb6c570:33183 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:28:43,322 INFO [RS:1;87c3fdb6c570:33183 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:28:43,323 INFO [RS:1;87c3fdb6c570:33183 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:28:43,324 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57048, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T21:28:43,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35361 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T21:28:43,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35361 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T21:28:43,325 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35361 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:28:43,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35361 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T21:28:43,327 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:28:43,328 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:43,328 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35361 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-02T21:28:43,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T21:28:43,329 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:28:43,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40921 is added to blk_1073741835_1011 (size=393) 2024-12-02T21:28:43,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37751 is added to blk_1073741835_1011 (size=393) 2024-12-02T21:28:43,341 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:28:43,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:43,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:43,359 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:43,360 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:43,425 INFO [RS:1;87c3fdb6c570:33183 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C33183%2C1733174923193, suffix=, logDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193, archiveDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs, maxLogs=32 2024-12-02T21:28:43,427 INFO [RS:1;87c3fdb6c570:33183 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C33183%2C1733174923193.1733174923426 2024-12-02T21:28:43,434 INFO [RS:1;87c3fdb6c570:33183 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 2024-12-02T21:28:43,435 DEBUG [RS:1;87c3fdb6c570:33183 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42689:42689),(127.0.0.1/127.0.0.1:39749:39749)] 2024-12-02T21:28:43,739 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b0c5cfcfb485c924481201897ff11224, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db 2024-12-02T21:28:43,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37751 is added to blk_1073741837_1013 (size=76) 2024-12-02T21:28:43,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40921 is added to blk_1073741837_1013 (size=76) 2024-12-02T21:28:43,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:28:43,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing b0c5cfcfb485c924481201897ff11224, disabling compactions & flushes 2024-12-02T21:28:43,755 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:28:43,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:28:43,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. after waiting 0 ms 2024-12-02T21:28:43,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:28:43,755 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:28:43,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for b0c5cfcfb485c924481201897ff11224: Waiting for close lock at 1733174923755Disabling compacts and flushes for region at 1733174923755Disabling writes for close at 1733174923755Writing region close event to WAL at 1733174923755Closed at 1733174923755 2024-12-02T21:28:43,757 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:28:43,757 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733174923757"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733174923757"}]},"ts":"1733174923757"} 2024-12-02T21:28:43,760 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T21:28:43,761 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:28:43,762 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733174923762"}]},"ts":"1733174923762"} 2024-12-02T21:28:43,764 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-02T21:28:43,769 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {87c3fdb6c570=0} racks are {/default-rack=0} 2024-12-02T21:28:43,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T21:28:43,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T21:28:43,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T21:28:43,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T21:28:43,775 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T21:28:43,775 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T21:28:43,775 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T21:28:43,776 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b0c5cfcfb485c924481201897ff11224, ASSIGN}] 2024-12-02T21:28:43,778 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b0c5cfcfb485c924481201897ff11224, ASSIGN 2024-12-02T21:28:43,780 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b0c5cfcfb485c924481201897ff11224, ASSIGN; state=OFFLINE, location=87c3fdb6c570,42545,1733174921025; forceNewPlan=false, retain=false 2024-12-02T21:28:43,931 INFO [87c3fdb6c570:35361 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T21:28:43,931 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b0c5cfcfb485c924481201897ff11224, regionState=OPENING, regionLocation=87c3fdb6c570,42545,1733174921025 2024-12-02T21:28:43,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b0c5cfcfb485c924481201897ff11224, ASSIGN because future has completed 2024-12-02T21:28:43,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b0c5cfcfb485c924481201897ff11224, server=87c3fdb6c570,42545,1733174921025}] 2024-12-02T21:28:44,092 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:28:44,092 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b0c5cfcfb485c924481201897ff11224, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:28:44,093 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath b0c5cfcfb485c924481201897ff11224 2024-12-02T21:28:44,093 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:28:44,093 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b0c5cfcfb485c924481201897ff11224 2024-12-02T21:28:44,093 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b0c5cfcfb485c924481201897ff11224 2024-12-02T21:28:44,095 INFO [StoreOpener-b0c5cfcfb485c924481201897ff11224-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b0c5cfcfb485c924481201897ff11224 2024-12-02T21:28:44,097 INFO [StoreOpener-b0c5cfcfb485c924481201897ff11224-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b0c5cfcfb485c924481201897ff11224 columnFamilyName info 2024-12-02T21:28:44,098 DEBUG [StoreOpener-b0c5cfcfb485c924481201897ff11224-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:28:44,098 INFO [StoreOpener-b0c5cfcfb485c924481201897ff11224-1 {}] regionserver.HStore(327): Store=b0c5cfcfb485c924481201897ff11224/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:28:44,099 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b0c5cfcfb485c924481201897ff11224 2024-12-02T21:28:44,100 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224 2024-12-02T21:28:44,100 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224 2024-12-02T21:28:44,101 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b0c5cfcfb485c924481201897ff11224 2024-12-02T21:28:44,101 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b0c5cfcfb485c924481201897ff11224 2024-12-02T21:28:44,104 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b0c5cfcfb485c924481201897ff11224 2024-12-02T21:28:44,107 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:28:44,107 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b0c5cfcfb485c924481201897ff11224; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=793578, jitterRate=0.009086787700653076}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:28:44,107 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b0c5cfcfb485c924481201897ff11224 2024-12-02T21:28:44,108 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b0c5cfcfb485c924481201897ff11224: Running coprocessor pre-open hook at 1733174924093Writing region info on filesystem at 1733174924093Initializing all the Stores at 1733174924095 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174924095Cleaning up temporary data from old regions at 1733174924101 (+6 ms)Running coprocessor post-open hooks at 1733174924107 (+6 ms)Region opened successfully at 1733174924108 (+1 ms) 2024-12-02T21:28:44,109 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224., pid=6, masterSystemTime=1733174924087 2024-12-02T21:28:44,112 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:28:44,112 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:28:44,113 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b0c5cfcfb485c924481201897ff11224, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,42545,1733174921025 2024-12-02T21:28:44,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b0c5cfcfb485c924481201897ff11224, server=87c3fdb6c570,42545,1733174921025 because future has completed 2024-12-02T21:28:44,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T21:28:44,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b0c5cfcfb485c924481201897ff11224, server=87c3fdb6c570,42545,1733174921025 in 182 msec 2024-12-02T21:28:44,123 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T21:28:44,123 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b0c5cfcfb485c924481201897ff11224, ASSIGN in 344 msec 2024-12-02T21:28:44,124 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:28:44,124 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733174924124"}]},"ts":"1733174924124"} 2024-12-02T21:28:44,126 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-02T21:28:44,127 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:28:44,130 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 803 msec 2024-12-02T21:28:46,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T21:28:46,071 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T21:28:46,074 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T21:28:46,074 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-02T21:28:46,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:28:46,076 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T21:28:46,076 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T21:28:46,076 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T21:28:48,517 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:28:48,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:48,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:48,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:48,535 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:28:48,542 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-02T21:28:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T21:28:53,434 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-02T21:28:53,434 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-02T21:28:53,442 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T21:28:53,442 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:28:53,456 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:28:53,459 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:28:53,459 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:28:53,460 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:28:53,460 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:28:53,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30008f24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:28:53,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ab5b96c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:28:53,547 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3fd17220{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/java.io.tmpdir/jetty-localhost-46211-hadoop-hdfs-3_4_1-tests_jar-_-any-13219412705818509370/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:53,548 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c68f920{HTTP/1.1, (http/1.1)}{localhost:46211} 2024-12-02T21:28:53,548 INFO [Time-limited test {}] server.Server(415): Started @123540ms 2024-12-02T21:28:53,549 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:28:53,574 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:28:53,576 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:28:53,577 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:28:53,577 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:28:53,577 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:28:53,578 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f85c2b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:28:53,578 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64b89ed5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:28:53,668 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d82aaea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/java.io.tmpdir/jetty-localhost-33265-hadoop-hdfs-3_4_1-tests_jar-_-any-9272890858615253967/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:53,668 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3904e150{HTTP/1.1, (http/1.1)}{localhost:33265} 2024-12-02T21:28:53,668 INFO [Time-limited test {}] server.Server(415): Started @123660ms 2024-12-02T21:28:53,669 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:28:53,697 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:28:53,699 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:28:53,701 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:28:53,701 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:28:53,701 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:28:53,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b21f544{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:28:53,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47432b7b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:28:53,790 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@20faceaa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/java.io.tmpdir/jetty-localhost-34847-hadoop-hdfs-3_4_1-tests_jar-_-any-7921669495261443433/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:53,790 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e1ad43e{HTTP/1.1, (http/1.1)}{localhost:34847} 2024-12-02T21:28:53,790 INFO [Time-limited test {}] server.Server(415): Started @123782ms 2024-12-02T21:28:53,791 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:28:55,862 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5/current/BP-510899863-172.17.0.3-1733174918669/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:55,862 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6/current/BP-510899863-172.17.0.3-1733174918669/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:55,882 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:28:55,885 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd933535563981b with lease ID 0x7c59dd479eb1ace1: Processing first storage report for DS-74584e30-0164-426a-9a01-0c6fe0d1b09c from datanode DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669) 2024-12-02T21:28:55,885 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd933535563981b with lease ID 0x7c59dd479eb1ace1: from storage DS-74584e30-0164-426a-9a01-0c6fe0d1b09c node DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:28:55,885 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd933535563981b with lease ID 0x7c59dd479eb1ace1: Processing first storage report for DS-b0769b03-8c5c-422b-b84e-6ceb527f2d72 from datanode DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669) 2024-12-02T21:28:55,885 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd933535563981b with lease ID 0x7c59dd479eb1ace1: from storage DS-b0769b03-8c5c-422b-b84e-6ceb527f2d72 node DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T21:28:55,981 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data7/current/BP-510899863-172.17.0.3-1733174918669/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:55,981 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data8/current/BP-510899863-172.17.0.3-1733174918669/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:56,000 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:28:56,002 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfde6debc546e3e86 with lease ID 0x7c59dd479eb1ace2: Processing first storage report for DS-eed8541c-b823-415d-a2f9-9a441128ce5d from datanode DatanodeRegistration(127.0.0.1:34535, datanodeUuid=00d9fdea-3cc3-4e42-bda5-ec1334920e20, infoPort=42829, infoSecurePort=0, ipcPort=43055, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669) 2024-12-02T21:28:56,002 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfde6debc546e3e86 with lease ID 0x7c59dd479eb1ace2: from storage DS-eed8541c-b823-415d-a2f9-9a441128ce5d node DatanodeRegistration(127.0.0.1:34535, datanodeUuid=00d9fdea-3cc3-4e42-bda5-ec1334920e20, infoPort=42829, infoSecurePort=0, ipcPort=43055, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:28:56,002 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfde6debc546e3e86 with lease ID 0x7c59dd479eb1ace2: Processing first storage report for DS-53cc3165-2648-407a-9571-cd0dc16afa42 from datanode DatanodeRegistration(127.0.0.1:34535, datanodeUuid=00d9fdea-3cc3-4e42-bda5-ec1334920e20, infoPort=42829, infoSecurePort=0, ipcPort=43055, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669) 2024-12-02T21:28:56,002 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfde6debc546e3e86 with lease ID 0x7c59dd479eb1ace2: from storage DS-53cc3165-2648-407a-9571-cd0dc16afa42 node DatanodeRegistration(127.0.0.1:34535, datanodeUuid=00d9fdea-3cc3-4e42-bda5-ec1334920e20, infoPort=42829, infoSecurePort=0, ipcPort=43055, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:28:56,041 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data9/current/BP-510899863-172.17.0.3-1733174918669/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:56,042 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data10/current/BP-510899863-172.17.0.3-1733174918669/current, will proceed with Du for space computation calculation, 2024-12-02T21:28:56,062 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:28:56,064 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb149b13546ac56e8 with lease ID 0x7c59dd479eb1ace3: Processing first storage report for DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc from datanode DatanodeRegistration(127.0.0.1:33759, datanodeUuid=35b544fa-31f8-43d3-ba5c-02e71c9566d1, infoPort=35447, infoSecurePort=0, ipcPort=41719, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669) 2024-12-02T21:28:56,064 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb149b13546ac56e8 with lease ID 0x7c59dd479eb1ace3: from storage DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc node DatanodeRegistration(127.0.0.1:33759, datanodeUuid=35b544fa-31f8-43d3-ba5c-02e71c9566d1, infoPort=35447, infoSecurePort=0, ipcPort=41719, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:28:56,064 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb149b13546ac56e8 with lease ID 0x7c59dd479eb1ace3: Processing first storage report for DS-22b6e0a9-b230-4475-976a-27fdf59b0d55 from datanode DatanodeRegistration(127.0.0.1:33759, datanodeUuid=35b544fa-31f8-43d3-ba5c-02e71c9566d1, infoPort=35447, infoSecurePort=0, ipcPort=41719, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669) 2024-12-02T21:28:56,064 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb149b13546ac56e8 with lease ID 0x7c59dd479eb1ace3: from storage DS-22b6e0a9-b230-4475-976a-27fdf59b0d55 node DatanodeRegistration(127.0.0.1:33759, datanodeUuid=35b544fa-31f8-43d3-ba5c-02e71c9566d1, infoPort=35447, infoSecurePort=0, ipcPort=41719, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:28:56,135 WARN [ResponseProcessor for block BP-510899863-172.17.0.3-1733174918669:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-510899863-172.17.0.3-1733174918669:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,136 WARN [ResponseProcessor for block BP-510899863-172.17.0.3-1733174918669:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-510899863-172.17.0.3-1733174918669:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-510899863-172.17.0.3-1733174918669:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,136 WARN [ResponseProcessor for block BP-510899863-172.17.0.3-1733174918669:blk_1073741836_1012 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-510899863-172.17.0.3-1733174918669:blk_1073741836_1012 java.io.IOException: Bad response ERROR for BP-510899863-172.17.0.3-1733174918669:blk_1073741836_1012 from datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,136 WARN [DataStreamer for file /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 block BP-510899863-172.17.0.3-1733174918669:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK], DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:28:56,137 WARN [ResponseProcessor for block BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,136 WARN [PacketResponder: BP-510899863-172.17.0.3-1733174918669:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37751] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,138 WARN [DataStreamer for file /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta block BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:28:56,138 WARN [DataStreamer for file /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 block BP-510899863-172.17.0.3-1733174918669:blk_1073741836_1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741836_1012 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:28:56,138 WARN [PacketResponder: BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37751] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,138 WARN [PacketResponder: BP-510899863-172.17.0.3-1733174918669:blk_1073741836_1012, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37751] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,136 WARN [DataStreamer for file /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 block BP-510899863-172.17.0.3-1733174918669:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:28:56,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:38768 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38768 dst: /127.0.0.1:40921 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-722415833_22 at /127.0.0.1:38780 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741836_1012] {}] datanode.DataXceiver(331): 127.0.0.1:40921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38780 dst: /127.0.0.1:40921 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:42338 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42338 dst: /127.0.0.1:37751 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2091446860_22 at /127.0.0.1:37070 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37070 dst: /127.0.0.1:37751 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:42342 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42342 dst: /127.0.0.1:37751 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2091446860_22 at /127.0.0.1:47994 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47994 dst: /127.0.0.1:40921 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:38758 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38758 dst: /127.0.0.1:40921 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-722415833_22 at /127.0.0.1:42358 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741836_1012] {}] datanode.DataXceiver(331): 127.0.0.1:37751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42358 dst: /127.0.0.1:37751 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,143 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@272348fe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:56,143 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10b53169{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:28:56,143 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:28:56,143 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bb19ef9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:28:56,143 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32403ac6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,STOPPED} 2024-12-02T21:28:56,145 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:28:56,145 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:28:56,145 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:28:56,145 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-510899863-172.17.0.3-1733174918669 (Datanode Uuid e7491dd4-eed2-463b-a465-b069e6a1f484) service to localhost/127.0.0.1:43877 2024-12-02T21:28:56,146 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data3/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:56,146 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data4/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:56,146 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:28:56,146 WARN [DataStreamer for file /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 block BP-510899863-172.17.0.3-1733174918669:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,146 WARN [DataStreamer for file /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta block BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,148 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@38cdedad {}] datanode.DataXceiver(331): 127.0.0.1:40921:DataXceiver error processing unknown operation src: /127.0.0.1:49240 dst: /127.0.0.1:40921 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-722415833_22 at /127.0.0.1:49238 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741836_1012] {}] datanode.DataXceiver(331): 127.0.0.1:40921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49238 dst: /127.0.0.1:40921 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:56,148 WARN [DataStreamer for file /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 block BP-510899863-172.17.0.3-1733174918669:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,149 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5538b075{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:56,149 WARN [ResponseProcessor for block BP-510899863-172.17.0.3-1733174918669:blk_1073741836_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-510899863-172.17.0.3-1733174918669:blk_1073741836_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,150 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f87a993{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:28:56,150 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:28:56,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bb5d847{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:28:56,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bf32f74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,STOPPED} 2024-12-02T21:28:56,151 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:28:56,151 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:28:56,151 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:28:56,151 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-510899863-172.17.0.3-1733174918669 (Datanode Uuid 77dd7d53-78d4-48ee-bed7-6e78c79c324a) service to localhost/127.0.0.1:43877 2024-12-02T21:28:56,152 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data1/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:56,152 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data2/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:56,152 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:28:56,155 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224., hostname=87c3fdb6c570,42545,1733174921025, seqNum=2] 2024-12-02T21:28:56,156 ERROR [FSHLog-0-hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db-prefix:87c3fdb6c570,42545,1733174921025 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,156 WARN [FSHLog-0-hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db-prefix:87c3fdb6c570,42545,1733174921025 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,156 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,157 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C42545%2C1733174921025:(num 1733174922705) roll requested 2024-12-02T21:28:56,157 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42545%2C1733174921025.1733174936157 2024-12-02T21:28:56,162 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:56,162 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:56,162 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:56,162 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:56,163 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:28:56,163 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174936157 2024-12-02T21:28:56,163 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,163 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:56,163 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42829:42829),(127.0.0.1/127.0.0.1:35447:35447)] 2024-12-02T21:28:56,163 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 is not closed yet, will try archiving it next time 2024-12-02T21:28:56,164 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-02T21:28:56,164 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-02T21:28:56,165 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 2024-12-02T21:28:56,167 WARN [IPC Server handler 4 on default port 43877 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-02T21:28:56,170 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 after 4ms 2024-12-02T21:28:57,146 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:57,296 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:58,164 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:58,165 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174936157 2024-12-02T21:28:58,167 WARN [ResponseProcessor for block BP-510899863-172.17.0.3-1733174918669:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-510899863-172.17.0.3-1733174918669:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:58,168 WARN [DataStreamer for file /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174936157 block BP-510899863-172.17.0.3-1733174918669:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK], DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:28:58,169 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:45014 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:34535:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45014 dst: /127.0.0.1:34535 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:58,170 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:45820 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33759:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45820 dst: /127.0.0.1:33759 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:28:58,204 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d82aaea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:28:58,205 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3904e150{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:28:58,206 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:28:58,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64b89ed5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:28:58,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f85c2b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,STOPPED} 2024-12-02T21:28:58,208 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:28:58,208 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-510899863-172.17.0.3-1733174918669 (Datanode Uuid 00d9fdea-3cc3-4e42-bda5-ec1334920e20) service to localhost/127.0.0.1:43877 2024-12-02T21:28:58,208 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:28:58,208 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:28:58,209 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data7/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:58,209 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data8/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:28:58,209 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:28:59,147 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:28:59,296 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:00,165 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:00,166 WARN [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]] 2024-12-02T21:29:00,167 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C42545%2C1733174921025:(num 1733174936157) roll requested 2024-12-02T21:29:00,168 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42545%2C1733174921025.1733174940167 2024-12-02T21:29:00,172 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 after 4007ms 2024-12-02T21:29:00,176 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:00,176 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:45832 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data10]'}, localName='127.0.0.1:33759', datanodeUuid='35b544fa-31f8-43d3-ba5c-02e71c9566d1', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741839_1021 to mirror 127.0.0.1:34535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:00,176 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK], DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:00,176 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741839_1021 2024-12-02T21:29:00,176 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:45832 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T21:29:00,177 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:45832 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33759:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45832 dst: /127.0.0.1:33759 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:00,179 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:00,185 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40921 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:00,185 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:45740 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741840_1022 to mirror 127.0.0.1:40921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:00,185 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:00,185 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741840_1022 2024-12-02T21:29:00,185 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:45740 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T21:29:00,186 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:45740 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45740 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:00,186 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:00,188 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:00,188 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:29:00,188 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741841_1023 2024-12-02T21:29:00,189 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] 2024-12-02T21:29:00,193 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:00,193 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:00,193 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:00,193 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:00,194 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:00,194 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174936157 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174940167 2024-12-02T21:29:00,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33759 is added to blk_1073741838_1020 (size=2431) 2024-12-02T21:29:00,196 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35447:35447),(127.0.0.1/127.0.0.1:40977:40977)] 2024-12-02T21:29:00,196 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 is not closed yet, will try archiving it next time 2024-12-02T21:29:00,196 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174936157 is not closed yet, will try archiving it next time 2024-12-02T21:29:00,215 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T21:29:00,598 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 is not closed yet, will try archiving it next time 2024-12-02T21:29:01,081 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3ae3e219[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33759, datanodeUuid=35b544fa-31f8-43d3-ba5c-02e71c9566d1, infoPort=35447, infoSecurePort=0, ipcPort=41719, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669):Failed to transfer BP-510899863-172.17.0.3-1733174918669:blk_1073741838_1020 to 127.0.0.1:40921 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:01,148 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:01,297 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:02,197 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:02,219 WARN [ResponseProcessor for block BP-510899863-172.17.0.3-1733174918669:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-510899863-172.17.0.3-1733174918669:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:02,220 WARN [DataStreamer for file /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174940167 block BP-510899863-172.17.0.3-1733174918669:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:02,221 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:45844 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33759:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45844 dst: /127.0.0.1:33759 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:02,222 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:45752 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45752 dst: /127.0.0.1:35729 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:02,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@20faceaa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:02,269 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e1ad43e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:29:02,269 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:29:02,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47432b7b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:29:02,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b21f544{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,STOPPED} 2024-12-02T21:29:02,271 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:29:02,271 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:29:02,271 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-510899863-172.17.0.3-1733174918669 (Datanode Uuid 35b544fa-31f8-43d3-ba5c-02e71c9566d1) service to localhost/127.0.0.1:43877 2024-12-02T21:29:02,271 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:29:02,272 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data9/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:02,272 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data10/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:02,272 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:29:02,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42545 {}] regionserver.HRegion(8855): Flush requested on b0c5cfcfb485c924481201897ff11224 2024-12-02T21:29:02,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0c5cfcfb485c924481201897ff11224 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:29:02,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/d5c584e278cf4798a21dad256288f4e3 is 1080, key is row0002/info:/1733174938211/Put/seqid=0 2024-12-02T21:29:02,308 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37751 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:02,308 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57358 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741843_1026 to mirror 127.0.0.1:37751 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:02,308 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:29:02,308 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741843_1026 2024-12-02T21:29:02,308 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57358 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:02,308 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57358 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57358 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:02,309 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] 2024-12-02T21:29:02,310 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:02,310 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK], DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:02,310 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741844_1027 2024-12-02T21:29:02,310 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:02,311 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:02,311 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK], DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:02,311 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741845_1028 2024-12-02T21:29:02,312 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:02,313 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:02,313 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:02,313 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741846_1029 2024-12-02T21:29:02,314 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:02,314 WARN [IPC Server handler 4 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:29:02,314 WARN [IPC Server handler 4 on default port 43877 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:29:02,315 WARN [IPC Server handler 4 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:29:02,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741847_1030 (size=10347) 2024-12-02T21:29:02,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/d5c584e278cf4798a21dad256288f4e3 2024-12-02T21:29:02,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/d5c584e278cf4798a21dad256288f4e3 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d5c584e278cf4798a21dad256288f4e3 2024-12-02T21:29:02,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d5c584e278cf4798a21dad256288f4e3, entries=5, sequenceid=11, filesize=10.1 K 2024-12-02T21:29:02,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for b0c5cfcfb485c924481201897ff11224 in 459ms, sequenceid=11, compaction requested=false 2024-12-02T21:29:02,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0c5cfcfb485c924481201897ff11224: 2024-12-02T21:29:02,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42545 {}] regionserver.HRegion(8855): Flush requested on b0c5cfcfb485c924481201897ff11224 2024-12-02T21:29:02,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0c5cfcfb485c924481201897ff11224 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-02T21:29:02,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/d29dc6d2b80f44eaaf9999f60d118cb0 is 1080, key is row0007/info:/1733174942284/Put/seqid=0 2024-12-02T21:29:02,925 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33759 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:02,925 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57380 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741848_1031 to mirror 127.0.0.1:33759 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:02,925 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:02,925 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741848_1031 2024-12-02T21:29:02,925 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57380 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:02,925 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57380 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57380 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:02,926 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:02,927 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:02,927 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:29:02,927 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741849_1032 2024-12-02T21:29:02,927 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] 2024-12-02T21:29:02,929 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:02,929 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:02,929 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741850_1033 2024-12-02T21:29:02,929 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:02,930 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:02,931 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:02,931 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741851_1034 2024-12-02T21:29:02,931 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:02,932 WARN [IPC Server handler 3 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:29:02,932 WARN [IPC Server handler 3 on default port 43877 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:29:02,932 WARN [IPC Server handler 3 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:29:02,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741852_1035 (size=12506) 2024-12-02T21:29:03,148 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:03,298 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:03,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/d29dc6d2b80f44eaaf9999f60d118cb0 2024-12-02T21:29:03,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/d29dc6d2b80f44eaaf9999f60d118cb0 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d29dc6d2b80f44eaaf9999f60d118cb0 2024-12-02T21:29:03,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d29dc6d2b80f44eaaf9999f60d118cb0, entries=7, sequenceid=24, filesize=12.2 K 2024-12-02T21:29:03,361 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for b0c5cfcfb485c924481201897ff11224 in 444ms, sequenceid=24, compaction requested=false 2024-12-02T21:29:03,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0c5cfcfb485c924481201897ff11224: 2024-12-02T21:29:03,361 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-02T21:29:03,361 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:29:03,361 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d29dc6d2b80f44eaaf9999f60d118cb0 because midkey is the same as first or last row 2024-12-02T21:29:03,890 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7eb0943f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669):Failed to transfer BP-510899863-172.17.0.3-1733174918669:blk_1073741847_1030 to 127.0.0.1:37751 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:03,890 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@70776599[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669):Failed to transfer BP-510899863-172.17.0.3-1733174918669:blk_1073741852_1035 to 127.0.0.1:34535 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,198 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,198 WARN [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]] 2024-12-02T21:29:04,198 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C42545%2C1733174921025:(num 1733174940167) roll requested 2024-12-02T21:29:04,199 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42545%2C1733174921025.1733174944198 2024-12-02T21:29:04,204 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,205 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK], DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:29:04,205 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741853_1036 2024-12-02T21:29:04,206 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] 2024-12-02T21:29:04,208 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,208 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:04,209 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741854_1037 2024-12-02T21:29:04,209 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:04,211 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,211 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:04,211 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741855_1038 2024-12-02T21:29:04,212 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:04,214 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,214 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:04,214 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741856_1039 2024-12-02T21:29:04,215 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:04,215 WARN [IPC Server handler 1 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:29:04,215 WARN [IPC Server handler 1 on default port 43877 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:29:04,216 WARN [IPC Server handler 1 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:29:04,218 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:04,218 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:04,218 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:04,219 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:04,219 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:04,219 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174940167 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174944198 2024-12-02T21:29:04,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741842_1025 (size=25992) 2024-12-02T21:29:04,221 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40977:40977)] 2024-12-02T21:29:04,222 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 is not closed yet, will try archiving it next time 2024-12-02T21:29:04,222 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174940167 is not closed yet, will try archiving it next time 2024-12-02T21:29:04,222 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174936157 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs/87c3fdb6c570%2C42545%2C1733174921025.1733174936157 2024-12-02T21:29:04,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42545 {}] regionserver.HRegion(8855): Flush requested on b0c5cfcfb485c924481201897ff11224 2024-12-02T21:29:04,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0c5cfcfb485c924481201897ff11224 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-02T21:29:04,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/26cd86aca5304a539c66c8180a46d50a is 1079, key is tmprow/info:/1733174944351/Put/seqid=0 2024-12-02T21:29:04,361 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33759 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,361 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57404 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741858_1041 to mirror 127.0.0.1:33759 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,362 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:04,362 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741858_1041 2024-12-02T21:29:04,362 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57404 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:04,362 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57404 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57404 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,362 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:04,364 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37751 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,364 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57416 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741859_1042 to mirror 127.0.0.1:37751 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,364 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:29:04,364 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741859_1042 2024-12-02T21:29:04,364 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57416 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:04,364 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57416 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57416 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,365 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] 2024-12-02T21:29:04,367 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,367 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57428 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741860_1043 to mirror 127.0.0.1:34535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,367 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:04,367 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741860_1043 2024-12-02T21:29:04,367 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57428 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:04,368 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57428 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57428 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,368 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:04,370 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,370 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:04,370 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741861_1044 2024-12-02T21:29:04,371 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:04,372 WARN [IPC Server handler 2 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:29:04,372 WARN [IPC Server handler 2 on default port 43877 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:29:04,372 WARN [IPC Server handler 2 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:29:04,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741862_1045 (size=6027) 2024-12-02T21:29:04,623 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 is not closed yet, will try archiving it next time 2024-12-02T21:29:04,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/26cd86aca5304a539c66c8180a46d50a 2024-12-02T21:29:04,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/26cd86aca5304a539c66c8180a46d50a as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/26cd86aca5304a539c66c8180a46d50a 2024-12-02T21:29:04,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/26cd86aca5304a539c66c8180a46d50a, entries=1, sequenceid=34, filesize=5.9 K 2024-12-02T21:29:04,795 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b0c5cfcfb485c924481201897ff11224 in 440ms, sequenceid=34, compaction requested=true 2024-12-02T21:29:04,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0c5cfcfb485c924481201897ff11224: 2024-12-02T21:29:04,795 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-02T21:29:04,795 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:29:04,795 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d29dc6d2b80f44eaaf9999f60d118cb0 because midkey is the same as first or last row 2024-12-02T21:29:04,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b0c5cfcfb485c924481201897ff11224:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:29:04,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:29:04,795 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:29:04,797 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:29:04,797 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HStore(1541): b0c5cfcfb485c924481201897ff11224/info is initiating minor compaction (all files) 2024-12-02T21:29:04,797 INFO [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b0c5cfcfb485c924481201897ff11224/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:29:04,797 INFO [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d5c584e278cf4798a21dad256288f4e3, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d29dc6d2b80f44eaaf9999f60d118cb0, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/26cd86aca5304a539c66c8180a46d50a] into tmpdir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp, totalSize=28.2 K 2024-12-02T21:29:04,798 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] compactions.Compactor(225): Compacting d5c584e278cf4798a21dad256288f4e3, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733174938211 2024-12-02T21:29:04,798 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] compactions.Compactor(225): Compacting d29dc6d2b80f44eaaf9999f60d118cb0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733174942284 2024-12-02T21:29:04,799 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] compactions.Compactor(225): Compacting 26cd86aca5304a539c66c8180a46d50a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733174944351 2024-12-02T21:29:04,812 INFO [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b0c5cfcfb485c924481201897ff11224#info#compaction#21 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:29:04,812 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/f04ef4c298fa4c1a9a43c6129ae624c6 is 1080, key is row0002/info:/1733174938211/Put/seqid=0 2024-12-02T21:29:04,815 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33759 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,815 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57464 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741863_1046 to mirror 127.0.0.1:33759 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,815 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:04,815 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57464 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:04,815 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741863_1046 2024-12-02T21:29:04,815 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57464 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57464 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,816 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:04,817 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,817 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57478 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741864_1047 to mirror 127.0.0.1:34535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,818 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:04,818 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741864_1047 2024-12-02T21:29:04,818 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57478 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:04,818 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57478 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57478 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,818 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:04,819 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,819 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:04,819 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741865_1048 2024-12-02T21:29:04,820 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:04,822 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37751 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:04,822 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57486 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741866_1049 to mirror 127.0.0.1:37751 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,822 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:29:04,822 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741866_1049 2024-12-02T21:29:04,822 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57486 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:04,822 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57486 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57486 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:04,822 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] 2024-12-02T21:29:04,823 WARN [IPC Server handler 3 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:29:04,823 WARN [IPC Server handler 3 on default port 43877 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:29:04,823 WARN [IPC Server handler 3 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:29:04,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741867_1050 (size=17994) 2024-12-02T21:29:05,149 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:05,240 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/f04ef4c298fa4c1a9a43c6129ae624c6 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/f04ef4c298fa4c1a9a43c6129ae624c6 2024-12-02T21:29:05,249 INFO [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b0c5cfcfb485c924481201897ff11224/info of b0c5cfcfb485c924481201897ff11224 into f04ef4c298fa4c1a9a43c6129ae624c6(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b0c5cfcfb485c924481201897ff11224: 2024-12-02T21:29:05,249 INFO [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224., storeName=b0c5cfcfb485c924481201897ff11224/info, priority=13, startTime=1733174944795; duration=0sec 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/f04ef4c298fa4c1a9a43c6129ae624c6 because midkey is the same as first or last row 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/f04ef4c298fa4c1a9a43c6129ae624c6 because midkey is the same as first or last row 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/f04ef4c298fa4c1a9a43c6129ae624c6 because midkey is the same as first or last row 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:29:05,249 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b0c5cfcfb485c924481201897ff11224:info 2024-12-02T21:29:05,298 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:05,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42545 {}] regionserver.HRegion(8855): Flush requested on b0c5cfcfb485c924481201897ff11224 2024-12-02T21:29:05,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0c5cfcfb485c924481201897ff11224 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-02T21:29:05,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/7b68887c50f4422b9b1ac4a35a77b003 is 1079, key is tmprow/info:/1733174945791/Put/seqid=0 2024-12-02T21:29:05,804 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:05,804 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK], DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:29:05,804 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741868_1051 2024-12-02T21:29:05,805 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] 2024-12-02T21:29:05,806 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:05,806 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK], DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:05,806 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741869_1052 2024-12-02T21:29:05,807 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:05,809 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57494 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741870_1053 to mirror 127.0.0.1:40921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:05,809 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40921 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:05,810 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:05,810 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57494 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:05,810 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741870_1053 2024-12-02T21:29:05,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57494 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57494 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:05,810 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:05,812 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:05,812 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57504 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741871_1054 to mirror 127.0.0.1:34535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:05,812 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:05,812 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57504 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:05,812 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741871_1054 2024-12-02T21:29:05,813 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57504 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57504 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:05,813 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:05,814 WARN [IPC Server handler 0 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:29:05,814 WARN [IPC Server handler 0 on default port 43877 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:29:05,814 WARN [IPC Server handler 0 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:29:05,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741872_1055 (size=6027) 2024-12-02T21:29:06,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/7b68887c50f4422b9b1ac4a35a77b003 2024-12-02T21:29:06,222 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:06,222 WARN [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]] 2024-12-02T21:29:06,223 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C42545%2C1733174921025:(num 1733174944198) roll requested 2024-12-02T21:29:06,223 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42545%2C1733174921025.1733174946223 2024-12-02T21:29:06,227 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:06,228 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:06,228 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741873_1056 2024-12-02T21:29:06,229 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:06,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/7b68887c50f4422b9b1ac4a35a77b003 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/7b68887c50f4422b9b1ac4a35a77b003 2024-12-02T21:29:06,230 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:06,231 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:29:06,231 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741874_1057 2024-12-02T21:29:06,231 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] 2024-12-02T21:29:06,234 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40921 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:06,234 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:06,234 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57520 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741875_1058 to mirror 127.0.0.1:40921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:06,234 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741875_1058 2024-12-02T21:29:06,234 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57520 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T21:29:06,234 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57520 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57520 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:06,235 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:06,236 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:06,236 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:06,236 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741876_1059 2024-12-02T21:29:06,237 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:06,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/7b68887c50f4422b9b1ac4a35a77b003, entries=1, sequenceid=45, filesize=5.9 K 2024-12-02T21:29:06,238 WARN [IPC Server handler 0 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:29:06,238 WARN [IPC Server handler 0 on default port 43877 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:29:06,238 WARN [IPC Server handler 0 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:29:06,239 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b0c5cfcfb485c924481201897ff11224 in 446ms, sequenceid=45, compaction requested=false 2024-12-02T21:29:06,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0c5cfcfb485c924481201897ff11224: 2024-12-02T21:29:06,239 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-02T21:29:06,239 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:29:06,239 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/f04ef4c298fa4c1a9a43c6129ae624c6 because midkey is the same as first or last row 2024-12-02T21:29:06,241 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:06,241 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:06,241 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:06,241 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:06,241 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:06,241 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174944198 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174946223 2024-12-02T21:29:06,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741857_1040 (size=13591) 2024-12-02T21:29:06,247 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40977:40977)] 2024-12-02T21:29:06,247 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 is not closed yet, will try archiving it next time 2024-12-02T21:29:06,247 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174944198 is not closed yet, will try archiving it next time 2024-12-02T21:29:06,247 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174940167 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs/87c3fdb6c570%2C42545%2C1733174921025.1733174940167 2024-12-02T21:29:06,645 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 is not closed yet, will try archiving it next time 2024-12-02T21:29:06,889 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7eb0943f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669):Failed to transfer BP-510899863-172.17.0.3-1733174918669:blk_1073741862_1045 to 127.0.0.1:34535 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:06,889 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@70776599[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669):Failed to transfer BP-510899863-172.17.0.3-1733174918669:blk_1073741842_1025 to 127.0.0.1:40921 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,149 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:07,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42545 {}] regionserver.HRegion(8855): Flush requested on b0c5cfcfb485c924481201897ff11224 2024-12-02T21:29:07,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0c5cfcfb485c924481201897ff11224 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-02T21:29:07,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/d90a8aa7ad2f4f8e9a853af834270137 is 1079, key is tmprow/info:/1733174947230/Put/seqid=0 2024-12-02T21:29:07,245 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:07,246 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK], DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:29:07,246 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741878_1061 2024-12-02T21:29:07,246 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] 2024-12-02T21:29:07,247 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:07,247 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:07,247 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741879_1062 2024-12-02T21:29:07,248 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:07,249 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33759 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:07,249 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57544 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741880_1063 to mirror 127.0.0.1:33759 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,250 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:07,250 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741880_1063 2024-12-02T21:29:07,250 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57544 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:07,250 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57544 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57544 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,250 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:07,252 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40921 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:07,252 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57558 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741881_1064 to mirror 127.0.0.1:40921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,252 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:07,253 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741881_1064 2024-12-02T21:29:07,253 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57558 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:07,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57558 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57558 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,253 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:07,254 WARN [IPC Server handler 0 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:29:07,254 WARN [IPC Server handler 0 on default port 43877 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:29:07,254 WARN [IPC Server handler 0 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:29:07,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741882_1065 (size=6027) 2024-12-02T21:29:07,299 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:07,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/d90a8aa7ad2f4f8e9a853af834270137 2024-12-02T21:29:07,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/d90a8aa7ad2f4f8e9a853af834270137 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d90a8aa7ad2f4f8e9a853af834270137 2024-12-02T21:29:07,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d90a8aa7ad2f4f8e9a853af834270137, entries=1, sequenceid=55, filesize=5.9 K 2024-12-02T21:29:07,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b0c5cfcfb485c924481201897ff11224 in 447ms, sequenceid=55, compaction requested=true 2024-12-02T21:29:07,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0c5cfcfb485c924481201897ff11224: 2024-12-02T21:29:07,681 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-12-02T21:29:07,681 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:29:07,681 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/f04ef4c298fa4c1a9a43c6129ae624c6 because midkey is the same as first or last row 2024-12-02T21:29:07,681 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b0c5cfcfb485c924481201897ff11224:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:29:07,681 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:29:07,681 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:29:07,683 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:29:07,683 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HStore(1541): b0c5cfcfb485c924481201897ff11224/info is initiating minor compaction (all files) 2024-12-02T21:29:07,683 INFO [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b0c5cfcfb485c924481201897ff11224/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:29:07,683 INFO [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/f04ef4c298fa4c1a9a43c6129ae624c6, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/7b68887c50f4422b9b1ac4a35a77b003, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d90a8aa7ad2f4f8e9a853af834270137] into tmpdir=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp, totalSize=29.3 K 2024-12-02T21:29:07,683 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] compactions.Compactor(225): Compacting f04ef4c298fa4c1a9a43c6129ae624c6, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733174938211 2024-12-02T21:29:07,684 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7b68887c50f4422b9b1ac4a35a77b003, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733174945791 2024-12-02T21:29:07,684 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] compactions.Compactor(225): Compacting d90a8aa7ad2f4f8e9a853af834270137, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733174947230 2024-12-02T21:29:07,699 INFO [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b0c5cfcfb485c924481201897ff11224#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:29:07,699 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/e3ddf210ab794852b3ee6b7e5ffbb164 is 1080, key is row0002/info:/1733174938211/Put/seqid=0 2024-12-02T21:29:07,702 WARN [Thread-979 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:07,702 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57572 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741883_1066 to mirror 127.0.0.1:34535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,702 WARN [Thread-979 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:07,702 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57572 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:07,702 WARN [Thread-979 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741883_1066 2024-12-02T21:29:07,702 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57572 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57572 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,703 WARN [Thread-979 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:07,704 WARN [Thread-979 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:07,704 WARN [Thread-979 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:07,704 WARN [Thread-979 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741884_1067 2024-12-02T21:29:07,705 WARN [Thread-979 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:07,707 WARN [Thread-979 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37751 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:07,707 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57588 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741885_1068 to mirror 127.0.0.1:37751 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,708 WARN [Thread-979 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]) is bad. 2024-12-02T21:29:07,708 WARN [Thread-979 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741885_1068 2024-12-02T21:29:07,708 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57588 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:07,708 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57588 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57588 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,708 WARN [Thread-979 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37751,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK] 2024-12-02T21:29:07,711 WARN [Thread-979 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33759 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:07,711 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57600 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741886_1069 to mirror 127.0.0.1:33759 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,711 WARN [Thread-979 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:07,711 WARN [Thread-979 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741886_1069 2024-12-02T21:29:07,711 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57600 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:07,711 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:57600 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57600 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,712 WARN [Thread-979 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:07,712 WARN [IPC Server handler 0 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:29:07,713 WARN [IPC Server handler 0 on default port 43877 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:29:07,713 WARN [IPC Server handler 0 on default port 43877 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:29:07,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741887_1070 (size=18097) 2024-12-02T21:29:07,886 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7eb0943f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669):Failed to transfer BP-510899863-172.17.0.3-1733174918669:blk_1073741872_1055 to 127.0.0.1:37751 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:07,886 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@70776599[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669):Failed to transfer BP-510899863-172.17.0.3-1733174918669:blk_1073741867_1050 to 127.0.0.1:33759 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:08,130 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/e3ddf210ab794852b3ee6b7e5ffbb164 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/e3ddf210ab794852b3ee6b7e5ffbb164 2024-12-02T21:29:08,138 INFO [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b0c5cfcfb485c924481201897ff11224/info of b0c5cfcfb485c924481201897ff11224 into e3ddf210ab794852b3ee6b7e5ffbb164(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:29:08,138 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b0c5cfcfb485c924481201897ff11224: 2024-12-02T21:29:08,138 INFO [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224., storeName=b0c5cfcfb485c924481201897ff11224/info, priority=13, startTime=1733174947681; duration=0sec 2024-12-02T21:29:08,138 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-02T21:29:08,138 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:29:08,138 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/e3ddf210ab794852b3ee6b7e5ffbb164 because midkey is the same as first or last row 2024-12-02T21:29:08,138 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-02T21:29:08,138 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:29:08,138 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/e3ddf210ab794852b3ee6b7e5ffbb164 because midkey is the same as first or last row 2024-12-02T21:29:08,138 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-02T21:29:08,139 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:29:08,139 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/e3ddf210ab794852b3ee6b7e5ffbb164 because midkey is the same as first or last row 2024-12-02T21:29:08,139 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:29:08,139 DEBUG [RS:0;87c3fdb6c570:42545-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b0c5cfcfb485c924481201897ff11224:info 2024-12-02T21:29:08,247 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:08,248 WARN [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-02T21:29:08,271 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:29:08,274 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:29:08,275 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:29:08,275 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:29:08,275 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:29:08,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d6a3337{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:29:08,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a8eeeb7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:29:08,367 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@122a196d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/java.io.tmpdir/jetty-localhost-37359-hadoop-hdfs-3_4_1-tests_jar-_-any-1872482003398214163/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:08,367 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a528add{HTTP/1.1, (http/1.1)}{localhost:37359} 2024-12-02T21:29:08,367 INFO [Time-limited test {}] server.Server(415): Started @138359ms 2024-12-02T21:29:08,368 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:29:08,801 WARN [Thread-1000 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:29:08,809 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x474a390252435d16 with lease ID 0x7c59dd479eb1ace4: from storage DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955 node DatanodeRegistration(127.0.0.1:36605, datanodeUuid=e7491dd4-eed2-463b-a465-b069e6a1f484, infoPort=44145, infoSecurePort=0, ipcPort=44491, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 6, hasStaleStorage: false, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-02T21:29:08,809 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x474a390252435d16 with lease ID 0x7c59dd479eb1ace4: from storage DS-65ec6835-25dd-4cc8-8a09-1d7a610e306e node DatanodeRegistration(127.0.0.1:36605, datanodeUuid=e7491dd4-eed2-463b-a465-b069e6a1f484, infoPort=44145, infoSecurePort=0, ipcPort=44491, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:09,150 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:09,299 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:09,890 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7eb0943f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669):Failed to transfer BP-510899863-172.17.0.3-1733174918669:blk_1073741882_1065 to 127.0.0.1:34535 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:09,890 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@70776599[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35729, datanodeUuid=15fbf574-b6f1-45db-9826-ff7d28c3d9b5, infoPort=40977, infoSecurePort=0, ipcPort=38857, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669):Failed to transfer BP-510899863-172.17.0.3-1733174918669:blk_1073741857_1040 to 127.0.0.1:33759 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:10,248 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:10,838 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:29:10,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741887_1070 (size=18097) 2024-12-02T21:29:11,150 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:11,300 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:12,249 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:12,368 ERROR [FSHLog-0-hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData-prefix:87c3fdb6c570,35361,1733174920862 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:12,368 WARN [FSHLog-0-hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData-prefix:87c3fdb6c570,35361,1733174920862 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:12,369 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C35361%2C1733174920862:(num 1733174921169) roll requested 2024-12-02T21:29:12,369 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C35361%2C1733174920862.1733174952369 2024-12-02T21:29:12,379 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:12,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:12,379 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:12,379 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:12,379 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:12,379 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862/87c3fdb6c570%2C35361%2C1733174920862.1733174952369 2024-12-02T21:29:12,380 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:12,380 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:12,380 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 2024-12-02T21:29:12,380 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44145:44145),(127.0.0.1/127.0.0.1:40977:40977)] 2024-12-02T21:29:12,380 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 is not closed yet, will try archiving it next time 2024-12-02T21:29:12,381 WARN [IPC Server handler 4 on default port 43877 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741830_1006 2024-12-02T21:29:12,381 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 after 1ms 2024-12-02T21:29:13,300 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:14,250 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:15,301 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:16,250 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:16,384 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 after 4004ms 2024-12-02T21:29:17,302 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:18,251 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:18,827 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@110425ab {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-510899863-172.17.0.3-1733174918669:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:40921,null,null]) java.net.ConnectException: Call From 87c3fdb6c570/172.17.0.3 to localhost:39023 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-02T21:29:18,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741833_1019 (size=455) 2024-12-02T21:29:19,204 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs/87c3fdb6c570%2C42545%2C1733174921025.1733174922705 2024-12-02T21:29:19,207 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174944198 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs/87c3fdb6c570%2C42545%2C1733174921025.1733174944198 2024-12-02T21:29:19,303 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:20,252 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:21,303 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:21,809 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@20dfd799[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36605, datanodeUuid=e7491dd4-eed2-463b-a465-b069e6a1f484, infoPort=44145, infoSecurePort=0, ipcPort=44491, storageInfo=lv=-57;cid=testClusterID;nsid=134593331;c=1733174918669):Failed to transfer BP-510899863-172.17.0.3-1733174918669:blk_1073741833_1019 to 127.0.0.1:34535 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:22,090 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42545%2C1733174921025.1733174962090 2024-12-02T21:29:22,097 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,098 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:22,098 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741889_1073 2024-12-02T21:29:22,099 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:22,101 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,101 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK], DatanodeInfoWithStorage[127.0.0.1:36605,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:22,101 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741890_1074 2024-12-02T21:29:22,102 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:22,107 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,107 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,107 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,107 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,107 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,108 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174946223 with entries=12, filesize=11.46 KB; new WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174962090 2024-12-02T21:29:22,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741877_1060 (size=11743) 2024-12-02T21:29:22,110 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44145:44145),(127.0.0.1/127.0.0.1:40977:40977)] 2024-12-02T21:29:22,110 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174946223 is not closed yet, will try archiving it next time 2024-12-02T21:29:22,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42545 {}] regionserver.HRegion(8855): Flush requested on b0c5cfcfb485c924481201897ff11224 2024-12-02T21:29:22,117 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b0c5cfcfb485c924481201897ff11224 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-02T21:29:22,122 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/a2bc1f4fbea34182a3c4513c8225d624 is 1080, key is row0013/info:/1733174962111/Put/seqid=0 2024-12-02T21:29:22,124 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,124 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:22,124 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741892_1076 2024-12-02T21:29:22,124 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:22,126 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,126 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK], DatanodeInfoWithStorage[127.0.0.1:36605,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:22,126 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741893_1077 2024-12-02T21:29:22,127 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:22,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741894_1078 (size=9267) 2024-12-02T21:29:22,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741894_1078 (size=9267) 2024-12-02T21:29:22,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/a2bc1f4fbea34182a3c4513c8225d624 2024-12-02T21:29:22,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/a2bc1f4fbea34182a3c4513c8225d624 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/a2bc1f4fbea34182a3c4513c8225d624 2024-12-02T21:29:22,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/a2bc1f4fbea34182a3c4513c8225d624, entries=4, sequenceid=66, filesize=9.0 K 2024-12-02T21:29:22,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for b0c5cfcfb485c924481201897ff11224 in 32ms, sequenceid=66, compaction requested=false 2024-12-02T21:29:22,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b0c5cfcfb485c924481201897ff11224: 2024-12-02T21:29:22,148 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-12-02T21:29:22,148 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:29:22,148 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/e3ddf210ab794852b3ee6b7e5ffbb164 because midkey is the same as first or last row 2024-12-02T21:29:22,253 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,253 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-02T21:29:22,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T21:29:22,335 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:29:22,335 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:29:22,335 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:22,336 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:22,336 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T21:29:22,336 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:29:22,336 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=962798992, stopped=false 2024-12-02T21:29:22,336 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87c3fdb6c570,35361,1733174920862 2024-12-02T21:29:22,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:29:22,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33183-0x10197f282340002, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:29:22,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:29:22,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33183-0x10197f282340002, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:22,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:22,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:22,387 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:29:22,388 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:29:22,388 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:29:22,388 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:22,388 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87c3fdb6c570,42545,1733174921025' ***** 2024-12-02T21:29:22,388 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:29:22,388 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T21:29:22,388 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87c3fdb6c570,33183,1733174923193' ***** 2024-12-02T21:29:22,388 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T21:29:22,389 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:29:22,389 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33183-0x10197f282340002, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:29:22,389 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T21:29:22,389 INFO [RS:0;87c3fdb6c570:42545 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:29:22,389 INFO [RS:0;87c3fdb6c570:42545 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:29:22,389 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:29:22,389 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(3091): Received CLOSE for b0c5cfcfb485c924481201897ff11224 2024-12-02T21:29:22,389 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:29:22,390 INFO [RS:1;87c3fdb6c570:33183 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:29:22,390 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T21:29:22,390 INFO [RS:1;87c3fdb6c570:33183 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:29:22,390 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer(959): stopping server 87c3fdb6c570,33183,1733174923193 2024-12-02T21:29:22,390 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(959): stopping server 87c3fdb6c570,42545,1733174921025 2024-12-02T21:29:22,390 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:29:22,390 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:29:22,390 INFO [RS:1;87c3fdb6c570:33183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;87c3fdb6c570:33183. 2024-12-02T21:29:22,390 INFO [RS:0;87c3fdb6c570:42545 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87c3fdb6c570:42545. 2024-12-02T21:29:22,390 DEBUG [RS:0;87c3fdb6c570:42545 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:29:22,390 DEBUG [RS:1;87c3fdb6c570:33183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:29:22,390 DEBUG [RS:0;87c3fdb6c570:42545 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:22,390 DEBUG [RS:1;87c3fdb6c570:33183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:22,390 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b0c5cfcfb485c924481201897ff11224, disabling compactions & flushes 2024-12-02T21:29:22,390 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:29:22,390 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:29:22,390 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer(976): stopping server 87c3fdb6c570,33183,1733174923193; all regions closed. 2024-12-02T21:29:22,390 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:29:22,390 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:29:22,390 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:29:22,390 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. after waiting 0 ms 2024-12-02T21:29:22,390 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:29:22,390 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T21:29:22,391 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing b0c5cfcfb485c924481201897ff11224 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-02T21:29:22,391 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,391 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,391 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,391 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T21:29:22,391 DEBUG [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, b0c5cfcfb485c924481201897ff11224=TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.} 2024-12-02T21:29:22,391 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,391 DEBUG [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b0c5cfcfb485c924481201897ff11224 2024-12-02T21:29:22,391 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:29:22,392 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:29:22,392 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:29:22,392 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:29:22,392 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:29:22,392 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-02T21:29:22,392 ERROR [FSHLog-0-hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db-prefix:87c3fdb6c570,42545,1733174921025.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,393 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,393 WARN [FSHLog-0-hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db-prefix:87c3fdb6c570,42545,1733174921025.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,393 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C42545%2C1733174921025.meta:.meta(num 1733174923003) roll requested 2024-12-02T21:29:22,393 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42545%2C1733174921025.meta.1733174962393.meta 2024-12-02T21:29:22,393 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,394 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,394 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 2024-12-02T21:29:22,394 WARN [IPC Server handler 0 on default port 43877 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 has not been closed. Lease recovery is in progress. RecoveryId = 1079 for block blk_1073741836_1015 2024-12-02T21:29:22,395 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 after 1ms 2024-12-02T21:29:22,397 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,397 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741895_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:22,397 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741895_1080 2024-12-02T21:29:22,397 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/70c98d9053354c2698a14f6633724859 is 1080, key is row0016/info:/1733174962118/Put/seqid=0 2024-12-02T21:29:22,398 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:22,399 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,399 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:22,399 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741896_1081 2024-12-02T21:29:22,400 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:22,401 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,401 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK], DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:22,401 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:58702 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741897_1082] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6]'}, localName='127.0.0.1:35729', datanodeUuid='15fbf574-b6f1-45db-9826-ff7d28c3d9b5', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741897_1082 to mirror 127.0.0.1:34535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:22,401 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741897_1082 2024-12-02T21:29:22,401 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:58702 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741897_1082] {}] datanode.BlockReceiver(316): Block 1073741897 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T21:29:22,401 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,401 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741898_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK], DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:22,401 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741898_1083 2024-12-02T21:29:22,401 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:58702 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741897_1082] {}] datanode.DataXceiver(331): 127.0.0.1:35729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58702 dst: /127.0.0.1:35729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:22,402 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:22,402 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:22,412 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741900_1085 (size=13583) 2024-12-02T21:29:22,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741900_1085 (size=13583) 2024-12-02T21:29:22,412 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,412 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,412 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,412 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,412 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174962393.meta 2024-12-02T21:29:22,413 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/70c98d9053354c2698a14f6633724859 2024-12-02T21:29:22,413 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,413 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,413 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta 2024-12-02T21:29:22,413 WARN [IPC Server handler 2 on default port 43877 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta has not been closed. Lease recovery is in progress. RecoveryId = 1086 for block blk_1073741834_1010 2024-12-02T21:29:22,414 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta after 1ms 2024-12-02T21:29:22,416 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44145:44145),(127.0.0.1/127.0.0.1:40977:40977)] 2024-12-02T21:29:22,416 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta is not closed yet, will try archiving it next time 2024-12-02T21:29:22,420 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/.tmp/info/70c98d9053354c2698a14f6633724859 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/70c98d9053354c2698a14f6633724859 2024-12-02T21:29:22,425 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/70c98d9053354c2698a14f6633724859, entries=8, sequenceid=77, filesize=13.3 K 2024-12-02T21:29:22,427 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for b0c5cfcfb485c924481201897ff11224 in 37ms, sequenceid=77, compaction requested=true 2024-12-02T21:29:22,427 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d5c584e278cf4798a21dad256288f4e3, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d29dc6d2b80f44eaaf9999f60d118cb0, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/f04ef4c298fa4c1a9a43c6129ae624c6, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/26cd86aca5304a539c66c8180a46d50a, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/7b68887c50f4422b9b1ac4a35a77b003, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d90a8aa7ad2f4f8e9a853af834270137] to archive 2024-12-02T21:29:22,428 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T21:29:22,431 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d5c584e278cf4798a21dad256288f4e3 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d5c584e278cf4798a21dad256288f4e3 2024-12-02T21:29:22,432 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d29dc6d2b80f44eaaf9999f60d118cb0 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d29dc6d2b80f44eaaf9999f60d118cb0 2024-12-02T21:29:22,433 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/f04ef4c298fa4c1a9a43c6129ae624c6 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/f04ef4c298fa4c1a9a43c6129ae624c6 2024-12-02T21:29:22,435 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/26cd86aca5304a539c66c8180a46d50a to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/26cd86aca5304a539c66c8180a46d50a 2024-12-02T21:29:22,436 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/7b68887c50f4422b9b1ac4a35a77b003 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/7b68887c50f4422b9b1ac4a35a77b003 2024-12-02T21:29:22,437 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d90a8aa7ad2f4f8e9a853af834270137 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/info/d90a8aa7ad2f4f8e9a853af834270137 2024-12-02T21:29:22,438 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=87c3fdb6c570:35361 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-02T21:29:22,438 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d5c584e278cf4798a21dad256288f4e3=10347, d29dc6d2b80f44eaaf9999f60d118cb0=12506, f04ef4c298fa4c1a9a43c6129ae624c6=17994, 26cd86aca5304a539c66c8180a46d50a=6027, 7b68887c50f4422b9b1ac4a35a77b003=6027, d90a8aa7ad2f4f8e9a853af834270137=6027] 2024-12-02T21:29:22,441 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/.tmp/info/af654539d65c42d8be88f42fe6d10df1 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224./info:regioninfo/1733174924113/Put/seqid=0 2024-12-02T21:29:22,442 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b0c5cfcfb485c924481201897ff11224/recovered.edits/80.seqid, newMaxSeqId=80, maxSeqId=1 2024-12-02T21:29:22,443 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,443 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK], DatanodeInfoWithStorage[127.0.0.1:36605,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK]) is bad. 2024-12-02T21:29:22,443 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741901_1087 2024-12-02T21:29:22,443 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:29:22,443 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b0c5cfcfb485c924481201897ff11224: Waiting for close lock at 1733174962390Running coprocessor pre-close hooks at 1733174962390Disabling compacts and flushes for region at 1733174962390Disabling writes for close at 1733174962390Obtaining lock to block concurrent updates at 1733174962391 (+1 ms)Preparing flush snapshotting stores in b0c5cfcfb485c924481201897ff11224 at 1733174962391Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224., syncing WAL and waiting on mvcc, flushsize=dataSize=8607, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1733174962391Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. at 1733174962392 (+1 ms)Flushing b0c5cfcfb485c924481201897ff11224/info: creating writer at 1733174962393 (+1 ms)Flushing b0c5cfcfb485c924481201897ff11224/info: appending metadata at 1733174962397 (+4 ms)Flushing b0c5cfcfb485c924481201897ff11224/info: closing flushed file at 1733174962397Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@608b120a: reopening flushed file at 1733174962419 (+22 ms)Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for b0c5cfcfb485c924481201897ff11224 in 37ms, sequenceid=77, compaction requested=true at 1733174962427 (+8 ms)Writing region close event to WAL at 1733174962438 (+11 ms)Running coprocessor post-close hooks at 1733174962443 (+5 ms)Closed at 1733174962443 2024-12-02T21:29:22,443 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40921,DS-7558115b-79be-4716-9d89-27c17c543e11,DISK] 2024-12-02T21:29:22,443 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733174923324.b0c5cfcfb485c924481201897ff11224. 2024-12-02T21:29:22,444 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,445 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK], DatanodeInfoWithStorage[127.0.0.1:36605,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:22,445 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741902_1088 2024-12-02T21:29:22,445 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:22,447 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33759 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,447 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:52376 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741903_1089] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data4]'}, localName='127.0.0.1:36605', datanodeUuid='e7491dd4-eed2-463b-a465-b069e6a1f484', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741903_1089 to mirror 127.0.0.1:33759 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:22,447 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36605,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK], DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:22,447 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741903_1089 2024-12-02T21:29:22,448 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:52376 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741903_1089] {}] datanode.BlockReceiver(316): Block 1073741903 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:22,448 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:52376 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741903_1089] {}] datanode.DataXceiver(331): 127.0.0.1:36605:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52376 dst: /127.0.0.1:36605 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:22,448 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:22,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741904_1090 (size=7089) 2024-12-02T21:29:22,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741904_1090 (size=7089) 2024-12-02T21:29:22,455 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/.tmp/info/af654539d65c42d8be88f42fe6d10df1 2024-12-02T21:29:22,475 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/.tmp/ns/aaccb89cfef945109e5c6c7e844a419c is 43, key is default/ns:d/1733174923087/Put/seqid=0 2024-12-02T21:29:22,477 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1091 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,477 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:52392 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741905_1091] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data4]'}, localName='127.0.0.1:36605', datanodeUuid='e7491dd4-eed2-463b-a465-b069e6a1f484', xmitsInProgress=0}:Exception transferring block BP-510899863-172.17.0.3-1733174918669:blk_1073741905_1091 to mirror 127.0.0.1:34535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:22,478 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741905_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36605,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK], DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:22,478 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741905_1091 2024-12-02T21:29:22,478 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:52392 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741905_1091] {}] datanode.BlockReceiver(316): Block 1073741905 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T21:29:22,478 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_34953894_22 at /127.0.0.1:52392 [Receiving block BP-510899863-172.17.0.3-1733174918669:blk_1073741905_1091] {}] datanode.DataXceiver(331): 127.0.0.1:36605:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52392 dst: /127.0.0.1:36605 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:22,478 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:22,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741906_1092 (size=5153) 2024-12-02T21:29:22,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741906_1092 (size=5153) 2024-12-02T21:29:22,484 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/.tmp/ns/aaccb89cfef945109e5c6c7e844a419c 2024-12-02T21:29:22,506 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/.tmp/table/c82441d78f434dc0b3a94edef7c76ecb is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733174924124/Put/seqid=0 2024-12-02T21:29:22,508 WARN [Thread-1073 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1093 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,509 WARN [Thread-1073 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741907_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK], DatanodeInfoWithStorage[127.0.0.1:36605,DS-6ef154a4-ae5a-4693-8ba7-1de44f10a955,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK]) is bad. 2024-12-02T21:29:22,509 WARN [Thread-1073 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741907_1093 2024-12-02T21:29:22,509 WARN [Thread-1073 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34535,DS-eed8541c-b823-415d-a2f9-9a441128ce5d,DISK] 2024-12-02T21:29:22,510 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.1733174946223 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs/87c3fdb6c570%2C42545%2C1733174921025.1733174946223 2024-12-02T21:29:22,511 WARN [Thread-1073 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741908_1094 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:22,511 WARN [Thread-1073 {}] hdfs.DataStreamer(1731): Error Recovery for BP-510899863-172.17.0.3-1733174918669:blk_1073741908_1094 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK], DatanodeInfoWithStorage[127.0.0.1:35729,DS-74584e30-0164-426a-9a01-0c6fe0d1b09c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK]) is bad. 2024-12-02T21:29:22,511 WARN [Thread-1073 {}] hdfs.DataStreamer(1850): Abandoning BP-510899863-172.17.0.3-1733174918669:blk_1073741908_1094 2024-12-02T21:29:22,511 WARN [Thread-1073 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33759,DS-aae85e50-de1f-4ae5-9b44-f256bb0439bc,DISK] 2024-12-02T21:29:22,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741909_1095 (size=5424) 2024-12-02T21:29:22,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741909_1095 (size=5424) 2024-12-02T21:29:22,517 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/.tmp/table/c82441d78f434dc0b3a94edef7c76ecb 2024-12-02T21:29:22,524 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/.tmp/info/af654539d65c42d8be88f42fe6d10df1 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/info/af654539d65c42d8be88f42fe6d10df1 2024-12-02T21:29:22,532 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/info/af654539d65c42d8be88f42fe6d10df1, entries=10, sequenceid=11, filesize=6.9 K 2024-12-02T21:29:22,533 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/.tmp/ns/aaccb89cfef945109e5c6c7e844a419c as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/ns/aaccb89cfef945109e5c6c7e844a419c 2024-12-02T21:29:22,541 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/ns/aaccb89cfef945109e5c6c7e844a419c, entries=2, sequenceid=11, filesize=5.0 K 2024-12-02T21:29:22,542 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/.tmp/table/c82441d78f434dc0b3a94edef7c76ecb as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/table/c82441d78f434dc0b3a94edef7c76ecb 2024-12-02T21:29:22,549 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/table/c82441d78f434dc0b3a94edef7c76ecb, entries=2, sequenceid=11, filesize=5.3 K 2024-12-02T21:29:22,550 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 158ms, sequenceid=11, compaction requested=false 2024-12-02T21:29:22,551 INFO [regionserver/87c3fdb6c570:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:29:22,554 INFO [regionserver/87c3fdb6c570:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T21:29:22,554 INFO [regionserver/87c3fdb6c570:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T21:29:22,556 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-02T21:29:22,557 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:29:22,557 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:29:22,557 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733174962391Running coprocessor pre-close hooks at 1733174962391Disabling compacts and flushes for region at 1733174962391Disabling writes for close at 1733174962392 (+1 ms)Obtaining lock to block concurrent updates at 1733174962392Preparing flush snapshotting stores in 1588230740 at 1733174962392Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733174962392Flushing stores of hbase:meta,,1.1588230740 at 1733174962417 (+25 ms)Flushing 1588230740/info: creating writer at 1733174962417Flushing 1588230740/info: appending metadata at 1733174962440 (+23 ms)Flushing 1588230740/info: closing flushed file at 1733174962440Flushing 1588230740/ns: creating writer at 1733174962461 (+21 ms)Flushing 1588230740/ns: appending metadata at 1733174962474 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733174962474Flushing 1588230740/table: creating writer at 1733174962491 (+17 ms)Flushing 1588230740/table: appending metadata at 1733174962506 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733174962506Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ededdd5: reopening flushed file at 1733174962523 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fbcd69: reopening flushed file at 1733174962532 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@109ffaa0: reopening flushed file at 1733174962541 (+9 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 158ms, sequenceid=11, compaction requested=false at 1733174962550 (+9 ms)Writing region close event to WAL at 1733174962552 (+2 ms)Running coprocessor post-close hooks at 1733174962556 (+4 ms)Closed at 1733174962557 (+1 ms) 2024-12-02T21:29:22,557 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:29:22,592 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(976): stopping server 87c3fdb6c570,42545,1733174921025; all regions closed. 2024-12-02T21:29:22,592 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,592 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,593 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,593 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,593 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:22,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741899_1084 (size=825) 2024-12-02T21:29:22,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741899_1084 (size=825) 2024-12-02T21:29:23,119 INFO [master/87c3fdb6c570:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T21:29:23,119 INFO [master/87c3fdb6c570:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T21:29:23,295 INFO [regionserver/87c3fdb6c570:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T21:29:23,296 INFO [regionserver/87c3fdb6c570:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T21:29:23,298 INFO [regionserver/87c3fdb6c570:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:29:24,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:29:24,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:29:24,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741877_1060 (size=11743) 2024-12-02T21:29:25,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:29:26,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T21:29:26,072 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:29:26,073 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T21:29:26,396 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 after 4002ms 2024-12-02T21:29:26,415 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta after 4002ms 2024-12-02T21:29:27,393 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-02T21:29:27,395 DEBUG [RS:1;87c3fdb6c570:33183 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs 2024-12-02T21:29:27,395 INFO [RS:1;87c3fdb6c570:33183 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C33183%2C1733174923193:(num 1733174923426) 2024-12-02T21:29:27,395 DEBUG [RS:1;87c3fdb6c570:33183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:27,395 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:29:27,396 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:29:27,396 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.ChoreService(370): Chore service for: regionserver/87c3fdb6c570:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T21:29:27,396 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:29:27,396 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:29:27,396 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:29:27,396 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:29:27,396 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:29:27,396 INFO [RS:1;87c3fdb6c570:33183 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33183 2024-12-02T21:29:27,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:27,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33183-0x10197f282340002, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87c3fdb6c570,33183,1733174923193 2024-12-02T21:29:27,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:29:27,418 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:29:27,429 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87c3fdb6c570,33183,1733174923193] 2024-12-02T21:29:27,439 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87c3fdb6c570,33183,1733174923193 already deleted, retry=false 2024-12-02T21:29:27,439 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87c3fdb6c570,33183,1733174923193 expired; onlineServers=1 2024-12-02T21:29:27,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33183-0x10197f282340002, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:29:27,529 INFO [RS:1;87c3fdb6c570:33183 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:29:27,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33183-0x10197f282340002, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:29:27,529 INFO [RS:1;87c3fdb6c570:33183 {}] regionserver.HRegionServer(1031): Exiting; stopping=87c3fdb6c570,33183,1733174923193; zookeeper connection closed. 2024-12-02T21:29:27,529 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7da6fd3f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7da6fd3f 2024-12-02T21:29:27,593 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-02T21:29:27,597 DEBUG [RS:0;87c3fdb6c570:42545 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs 2024-12-02T21:29:27,597 INFO [RS:0;87c3fdb6c570:42545 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C42545%2C1733174921025.meta:.meta(num 1733174962393) 2024-12-02T21:29:27,598 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:27,598 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:27,598 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:27,598 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:27,599 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:27,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741891_1075 (size=14682) 2024-12-02T21:29:27,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741891_1075 (size=14682) 2024-12-02T21:29:27,605 DEBUG [RS:0;87c3fdb6c570:42545 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs 2024-12-02T21:29:27,605 INFO [RS:0;87c3fdb6c570:42545 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C42545%2C1733174921025:(num 1733174962090) 2024-12-02T21:29:27,605 DEBUG [RS:0;87c3fdb6c570:42545 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:27,605 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:29:27,606 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:29:27,606 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.ChoreService(370): Chore service for: regionserver/87c3fdb6c570:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T21:29:27,606 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:29:27,606 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:29:27,606 INFO [RS:0;87c3fdb6c570:42545 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42545 2024-12-02T21:29:27,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87c3fdb6c570,42545,1733174921025 2024-12-02T21:29:27,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:29:27,618 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:29:27,629 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87c3fdb6c570,42545,1733174921025] 2024-12-02T21:29:27,639 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87c3fdb6c570,42545,1733174921025 already deleted, retry=false 2024-12-02T21:29:27,639 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87c3fdb6c570,42545,1733174921025 expired; onlineServers=0 2024-12-02T21:29:27,640 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87c3fdb6c570,35361,1733174920862' ***** 2024-12-02T21:29:27,640 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:29:27,640 INFO [M:0;87c3fdb6c570:35361 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:29:27,640 INFO [M:0;87c3fdb6c570:35361 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:29:27,640 DEBUG [M:0;87c3fdb6c570:35361 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:29:27,641 DEBUG [M:0;87c3fdb6c570:35361 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:29:27,641 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:29:27,641 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174922368 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174922368,5,FailOnTimeoutGroup] 2024-12-02T21:29:27,641 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174922368 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174922368,5,FailOnTimeoutGroup] 2024-12-02T21:29:27,641 INFO [M:0;87c3fdb6c570:35361 {}] hbase.ChoreService(370): Chore service for: master/87c3fdb6c570:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T21:29:27,641 INFO [M:0;87c3fdb6c570:35361 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:29:27,642 DEBUG [M:0;87c3fdb6c570:35361 {}] master.HMaster(1795): Stopping service threads 2024-12-02T21:29:27,642 INFO [M:0;87c3fdb6c570:35361 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:29:27,642 INFO [M:0;87c3fdb6c570:35361 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:29:27,642 INFO [M:0;87c3fdb6c570:35361 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:29:27,643 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:29:27,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:29:27,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:27,650 DEBUG [M:0;87c3fdb6c570:35361 {}] zookeeper.ZKUtil(347): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:29:27,650 WARN [M:0;87c3fdb6c570:35361 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:29:27,652 INFO [M:0;87c3fdb6c570:35361 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/.lastflushedseqids 2024-12-02T21:29:27,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741910_1096 (size=130) 2024-12-02T21:29:27,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741910_1096 (size=130) 2024-12-02T21:29:27,662 INFO [M:0;87c3fdb6c570:35361 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T21:29:27,662 INFO [M:0;87c3fdb6c570:35361 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:29:27,662 DEBUG [M:0;87c3fdb6c570:35361 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:29:27,662 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:29:27,662 DEBUG [M:0;87c3fdb6c570:35361 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:29:27,662 DEBUG [M:0;87c3fdb6c570:35361 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:29:27,662 DEBUG [M:0;87c3fdb6c570:35361 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:29:27,663 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-12-02T21:29:27,680 DEBUG [M:0;87c3fdb6c570:35361 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/29ecb80cfc724ed8bb7534dac2003c83 is 82, key is hbase:meta,,1/info:regioninfo/1733174923032/Put/seqid=0 2024-12-02T21:29:27,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741911_1097 (size=5672) 2024-12-02T21:29:27,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741911_1097 (size=5672) 2024-12-02T21:29:27,686 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/29ecb80cfc724ed8bb7534dac2003c83 2024-12-02T21:29:27,704 DEBUG [M:0;87c3fdb6c570:35361 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/777ef71cb2524eec8ebe45c3a9f944b6 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733174924129/Put/seqid=0 2024-12-02T21:29:27,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741912_1098 (size=6256) 2024-12-02T21:29:27,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741912_1098 (size=6256) 2024-12-02T21:29:27,710 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/777ef71cb2524eec8ebe45c3a9f944b6 2024-12-02T21:29:27,715 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 777ef71cb2524eec8ebe45c3a9f944b6 2024-12-02T21:29:27,728 DEBUG [M:0;87c3fdb6c570:35361 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a1222ba734440698046c1897ced1c55 is 69, key is 87c3fdb6c570,33183,1733174923193/rs:state/1733174923271/Put/seqid=0 2024-12-02T21:29:27,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:29:27,729 INFO [RS:0;87c3fdb6c570:42545 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:29:27,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42545-0x10197f282340001, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:29:27,729 INFO [RS:0;87c3fdb6c570:42545 {}] regionserver.HRegionServer(1031): Exiting; stopping=87c3fdb6c570,42545,1733174921025; zookeeper connection closed. 2024-12-02T21:29:27,729 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52141cc1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52141cc1 2024-12-02T21:29:27,730 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-02T21:29:27,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741913_1099 (size=5224) 2024-12-02T21:29:27,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741913_1099 (size=5224) 2024-12-02T21:29:27,733 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a1222ba734440698046c1897ced1c55 2024-12-02T21:29:27,752 DEBUG [M:0;87c3fdb6c570:35361 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/83031ed01c874b1cb347ba3bb09b2dd5 is 52, key is load_balancer_on/state:d/1733174923176/Put/seqid=0 2024-12-02T21:29:27,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741914_1100 (size=5056) 2024-12-02T21:29:27,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741914_1100 (size=5056) 2024-12-02T21:29:27,758 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/83031ed01c874b1cb347ba3bb09b2dd5 2024-12-02T21:29:27,764 DEBUG [M:0;87c3fdb6c570:35361 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/29ecb80cfc724ed8bb7534dac2003c83 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/29ecb80cfc724ed8bb7534dac2003c83 2024-12-02T21:29:27,769 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/29ecb80cfc724ed8bb7534dac2003c83, entries=8, sequenceid=60, filesize=5.5 K 2024-12-02T21:29:27,770 DEBUG [M:0;87c3fdb6c570:35361 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/777ef71cb2524eec8ebe45c3a9f944b6 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/777ef71cb2524eec8ebe45c3a9f944b6 2024-12-02T21:29:27,776 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 777ef71cb2524eec8ebe45c3a9f944b6 2024-12-02T21:29:27,776 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/777ef71cb2524eec8ebe45c3a9f944b6, entries=6, sequenceid=60, filesize=6.1 K 2024-12-02T21:29:27,777 DEBUG [M:0;87c3fdb6c570:35361 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a1222ba734440698046c1897ced1c55 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a1222ba734440698046c1897ced1c55 2024-12-02T21:29:27,783 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a1222ba734440698046c1897ced1c55, entries=2, sequenceid=60, filesize=5.1 K 2024-12-02T21:29:27,784 DEBUG [M:0;87c3fdb6c570:35361 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/83031ed01c874b1cb347ba3bb09b2dd5 as hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/83031ed01c874b1cb347ba3bb09b2dd5 2024-12-02T21:29:27,789 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/83031ed01c874b1cb347ba3bb09b2dd5, entries=1, sequenceid=60, filesize=4.9 K 2024-12-02T21:29:27,790 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=60, compaction requested=false 2024-12-02T21:29:27,792 INFO [M:0;87c3fdb6c570:35361 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:29:27,792 DEBUG [M:0;87c3fdb6c570:35361 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733174967662Disabling compacts and flushes for region at 1733174967662Disabling writes for close at 1733174967662Obtaining lock to block concurrent updates at 1733174967663 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733174967663Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1733174967663Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733174967664 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733174967664Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733174967680 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733174967680Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733174967690 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733174967703 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733174967703Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733174967715 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733174967728 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733174967728Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733174967738 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733174967751 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733174967751Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60c6af73: reopening flushed file at 1733174967763 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75b58cc4: reopening flushed file at 1733174967769 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54ff5edc: reopening flushed file at 1733174967776 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60a6cf4d: reopening flushed file at 1733174967783 (+7 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=60, compaction requested=false at 1733174967790 (+7 ms)Writing region close event to WAL at 1733174967792 (+2 ms)Closed at 1733174967792 2024-12-02T21:29:27,792 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:27,792 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:27,792 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:27,792 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:27,793 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:27,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741888_1071 (size=1045) 2024-12-02T21:29:27,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36605 is added to blk_1073741888_1071 (size=1045) 2024-12-02T21:29:27,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741835_1011 (size=393) 2024-12-02T21:29:27,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741837_1013 (size=76) 2024-12-02T21:29:27,975 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:29:27,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,989 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:27,994 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:28,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:28,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:28,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:29:28,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35729 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:29:28,833 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@39833a96 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-510899863-172.17.0.3-1733174918669:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:40921,null,null]) java.net.ConnectException: Call From 87c3fdb6c570/172.17.0.3 to localhost:39023 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-02T21:29:29,402 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/WALs/87c3fdb6c570,35361,1733174920862/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/oldWALs/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 2024-12-02T21:29:29,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:29,411 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/MasterData/oldWALs/87c3fdb6c570%2C35361%2C1733174920862.1733174921169 to hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/oldWALs/87c3fdb6c570%2C35361%2C1733174920862.1733174921169$masterlocalwal$ 2024-12-02T21:29:29,411 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:29:29,411 INFO [M:0;87c3fdb6c570:35361 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T21:29:29,411 INFO [M:0;87c3fdb6c570:35361 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35361 2024-12-02T21:29:29,411 INFO [M:0;87c3fdb6c570:35361 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:29:29,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:29,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:29:29,561 INFO [M:0;87c3fdb6c570:35361 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:29:29,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35361-0x10197f282340000, quorum=127.0.0.1:59541, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:29:29,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@122a196d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:29,569 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a528add{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:29:29,569 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:29:29,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a8eeeb7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:29:29,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d6a3337{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,STOPPED} 2024-12-02T21:29:29,573 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:29:29,573 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:29:29,573 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:29:29,573 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-510899863-172.17.0.3-1733174918669 (Datanode Uuid e7491dd4-eed2-463b-a465-b069e6a1f484) service to localhost/127.0.0.1:43877 2024-12-02T21:29:29,573 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@41b29604 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:40921,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:39023 , LocalHost:localPort 87c3fdb6c570/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-02T21:29:29,573 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@41b29604 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:36605,null,null]) java.io.IOException: No block pool offer service for bpid=BP-510899863-172.17.0.3-1733174918669 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:29,574 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data3/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:29,574 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@41b29604 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:40921,null,null], DatanodeInfoWithStorage[127.0.0.1:36605,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-510899863-172.17.0.3-1733174918669:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:40921,null,null], DatanodeInfoWithStorage[127.0.0.1:36605,null,null]] 2024-12-02T21:29:29,574 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data4/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:29,574 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:29:29,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3fd17220{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:29,576 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c68f920{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:29:29,576 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:29:29,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ab5b96c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:29:29,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30008f24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,STOPPED} 2024-12-02T21:29:29,577 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:29:29,577 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:29:29,577 WARN [BP-510899863-172.17.0.3-1733174918669 heartbeating to localhost/127.0.0.1:43877 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-510899863-172.17.0.3-1733174918669 (Datanode Uuid 15fbf574-b6f1-45db-9826-ff7d28c3d9b5) service to localhost/127.0.0.1:43877 2024-12-02T21:29:29,578 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:29:29,578 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data5/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:29,578 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/cluster_bab0ab38-f779-7e90-b200-acf6b935e670/data/data6/current/BP-510899863-172.17.0.3-1733174918669 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:29,578 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:29:29,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7982676d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:29:29,583 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2efbdc75{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:29:29,583 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:29:29,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d790455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:29:29,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3150e6db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir/,STOPPED} 2024-12-02T21:29:29,590 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:29:29,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T21:29:29,630 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 83) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43877 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43877 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43877 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43877 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:43877 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43877 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f25acbef840.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43877 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43877 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:44183 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43877 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43877 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44183 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f25acbef840.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:43877 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=448 (was 405) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=130 (was 259), ProcessCount=11 (was 11), AvailableMemoryMB=6830 (was 2898) - AvailableMemoryMB LEAK? - 2024-12-02T21:29:29,637 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=130, ProcessCount=11, AvailableMemoryMB=6830 2024-12-02T21:29:29,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:29:29,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.log.dir so I do NOT create it in target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870 2024-12-02T21:29:29,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a93b426b-86c4-b561-0363-27bfe4a2ca33/hadoop.tmp.dir so I do NOT create it in target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870 2024-12-02T21:29:29,637 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b, deleteOnExit=true 2024-12-02T21:29:29,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T21:29:29,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/test.cache.data in system properties and HBase conf 2024-12-02T21:29:29,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:29:29,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T21:29:29,638 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:29:29,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:29:29,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:29:29,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:29:29,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:29:29,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:29:29,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:29:29,650 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:29:30,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:29:30,037 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:29:30,039 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:29:30,039 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:29:30,039 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:29:30,039 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:29:30,040 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60017892{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:29:30,040 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e0e18a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:29:30,129 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@57204301{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/java.io.tmpdir/jetty-localhost-44551-hadoop-hdfs-3_4_1-tests_jar-_-any-13443104076439834002/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:29:30,130 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@131b09a7{HTTP/1.1, (http/1.1)}{localhost:44551} 2024-12-02T21:29:30,130 INFO [Time-limited test {}] server.Server(415): Started @160122ms 2024-12-02T21:29:30,140 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:29:30,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:30,411 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:29:30,413 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:29:30,414 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:29:30,414 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:29:30,414 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:29:30,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e8ebafe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:29:30,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7353ad08{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:29:30,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:30,504 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@89ebf29{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/java.io.tmpdir/jetty-localhost-43415-hadoop-hdfs-3_4_1-tests_jar-_-any-16968674070765484766/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:30,504 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a4f8c69{HTTP/1.1, (http/1.1)}{localhost:43415} 2024-12-02T21:29:30,504 INFO [Time-limited test {}] server.Server(415): Started @160497ms 2024-12-02T21:29:30,506 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:29:30,530 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:29:30,533 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:29:30,534 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:29:30,534 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:29:30,534 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:29:30,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16a8f4c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:29:30,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b27dfb0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:29:30,624 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c9b16c7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/java.io.tmpdir/jetty-localhost-36993-hadoop-hdfs-3_4_1-tests_jar-_-any-13055554346784142767/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:30,624 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25cefabc{HTTP/1.1, (http/1.1)}{localhost:36993} 2024-12-02T21:29:30,624 INFO [Time-limited test {}] server.Server(415): Started @160616ms 2024-12-02T21:29:30,626 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:29:31,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:31,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:31,700 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data1/current/BP-725672116-172.17.0.3-1733174969661/current, will proceed with Du for space computation calculation, 2024-12-02T21:29:31,701 WARN [Thread-1214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data2/current/BP-725672116-172.17.0.3-1733174969661/current, will proceed with Du for space computation calculation, 2024-12-02T21:29:31,716 WARN [Thread-1177 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:29:31,718 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42a21d8f63c7c6b6 with lease ID 0xa7fed51b47c58643: Processing first storage report for DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3 from datanode DatanodeRegistration(127.0.0.1:43783, datanodeUuid=522a7cfd-5a57-46ca-91ce-4908e9afdae4, infoPort=40397, infoSecurePort=0, ipcPort=36523, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661) 2024-12-02T21:29:31,718 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42a21d8f63c7c6b6 with lease ID 0xa7fed51b47c58643: from storage DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3 node DatanodeRegistration(127.0.0.1:43783, datanodeUuid=522a7cfd-5a57-46ca-91ce-4908e9afdae4, infoPort=40397, infoSecurePort=0, ipcPort=36523, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:31,719 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42a21d8f63c7c6b6 with lease ID 0xa7fed51b47c58643: Processing first storage report for DS-a198ace5-6920-49b6-9225-03b59d60790f from datanode DatanodeRegistration(127.0.0.1:43783, datanodeUuid=522a7cfd-5a57-46ca-91ce-4908e9afdae4, infoPort=40397, infoSecurePort=0, ipcPort=36523, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661) 2024-12-02T21:29:31,719 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42a21d8f63c7c6b6 with lease ID 0xa7fed51b47c58643: from storage DS-a198ace5-6920-49b6-9225-03b59d60790f node DatanodeRegistration(127.0.0.1:43783, datanodeUuid=522a7cfd-5a57-46ca-91ce-4908e9afdae4, infoPort=40397, infoSecurePort=0, ipcPort=36523, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:31,826 WARN [Thread-1224 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data3/current/BP-725672116-172.17.0.3-1733174969661/current, will proceed with Du for space computation calculation, 2024-12-02T21:29:31,827 WARN [Thread-1225 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data4/current/BP-725672116-172.17.0.3-1733174969661/current, will proceed with Du for space computation calculation, 2024-12-02T21:29:31,845 WARN [Thread-1200 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:29:31,848 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9743a0ba801342e2 with lease ID 0xa7fed51b47c58644: Processing first storage report for DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a from datanode DatanodeRegistration(127.0.0.1:35055, datanodeUuid=471ad00b-f454-4e19-9fa8-4a8c791495a3, infoPort=43747, infoSecurePort=0, ipcPort=38419, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661) 2024-12-02T21:29:31,848 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9743a0ba801342e2 with lease ID 0xa7fed51b47c58644: from storage DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a node DatanodeRegistration(127.0.0.1:35055, datanodeUuid=471ad00b-f454-4e19-9fa8-4a8c791495a3, infoPort=43747, infoSecurePort=0, ipcPort=38419, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:31,848 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9743a0ba801342e2 with lease ID 0xa7fed51b47c58644: Processing first storage report for DS-ff02a3f6-cbb2-4239-b3f9-b23027de1920 from datanode DatanodeRegistration(127.0.0.1:35055, datanodeUuid=471ad00b-f454-4e19-9fa8-4a8c791495a3, infoPort=43747, infoSecurePort=0, ipcPort=38419, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661) 2024-12-02T21:29:31,848 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9743a0ba801342e2 with lease ID 0xa7fed51b47c58644: from storage DS-ff02a3f6-cbb2-4239-b3f9-b23027de1920 node DatanodeRegistration(127.0.0.1:35055, datanodeUuid=471ad00b-f454-4e19-9fa8-4a8c791495a3, infoPort=43747, infoSecurePort=0, ipcPort=38419, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:31,860 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870 2024-12-02T21:29:31,862 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/zookeeper_0, clientPort=51794, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:29:31,863 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51794 2024-12-02T21:29:31,863 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:29:31,865 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:29:31,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:29:31,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43783 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:29:31,875 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a with version=8 2024-12-02T21:29:31,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/hbase-staging 2024-12-02T21:29:31,878 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:29:31,878 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:29:31,878 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:29:31,878 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:29:31,878 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:29:31,878 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:29:31,878 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T21:29:31,878 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:29:31,879 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35035 2024-12-02T21:29:31,880 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35035 connecting to ZooKeeper ensemble=127.0.0.1:51794 2024-12-02T21:29:32,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:350350x0, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:29:32,057 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35035-0x10197f349810000 connected 2024-12-02T21:29:32,166 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:29:32,170 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:29:32,174 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:29:32,175 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a, hbase.cluster.distributed=false 2024-12-02T21:29:32,178 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:29:32,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35035 2024-12-02T21:29:32,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35035 2024-12-02T21:29:32,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35035 2024-12-02T21:29:32,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35035 2024-12-02T21:29:32,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35035 2024-12-02T21:29:32,192 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:29:32,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:29:32,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:29:32,192 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:29:32,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:29:32,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:29:32,192 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:29:32,193 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:29:32,193 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42773 2024-12-02T21:29:32,195 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42773 connecting to ZooKeeper ensemble=127.0.0.1:51794 2024-12-02T21:29:32,195 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:29:32,197 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:29:32,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:427730x0, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:29:32,208 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42773-0x10197f349810001 connected 2024-12-02T21:29:32,208 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:29:32,208 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:29:32,208 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:29:32,209 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:29:32,210 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:29:32,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42773 2024-12-02T21:29:32,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42773 2024-12-02T21:29:32,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42773 2024-12-02T21:29:32,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42773 2024-12-02T21:29:32,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42773 2024-12-02T21:29:32,224 DEBUG [M:0;87c3fdb6c570:35035 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87c3fdb6c570:35035 2024-12-02T21:29:32,225 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87c3fdb6c570,35035,1733174971877 2024-12-02T21:29:32,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:29:32,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:29:32,238 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87c3fdb6c570,35035,1733174971877 2024-12-02T21:29:32,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:29:32,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:32,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:32,251 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:29:32,251 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87c3fdb6c570,35035,1733174971877 from backup master directory 2024-12-02T21:29:32,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87c3fdb6c570,35035,1733174971877 2024-12-02T21:29:32,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:29:32,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:29:32,260 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:29:32,260 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87c3fdb6c570,35035,1733174971877 2024-12-02T21:29:32,270 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/hbase.id] with ID: efb3c4a6-e72a-45e8-9a5b-d2da259bd8a9 2024-12-02T21:29:32,271 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/.tmp/hbase.id 2024-12-02T21:29:32,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43783 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:29:32,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:29:32,277 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/.tmp/hbase.id]:[hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/hbase.id] 2024-12-02T21:29:32,289 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:29:32,289 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T21:29:32,290 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-02T21:29:32,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:32,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:32,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:29:32,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43783 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:29:32,309 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:29:32,310 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:29:32,310 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:29:32,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43783 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:29:32,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:29:32,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:32,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:32,721 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store 2024-12-02T21:29:32,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43783 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:29:32,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:29:32,733 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:29:32,733 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:29:32,733 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:29:32,733 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:29:32,733 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:29:32,733 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:29:32,733 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:29:32,733 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733174972733Disabling compacts and flushes for region at 1733174972733Disabling writes for close at 1733174972733Writing region close event to WAL at 1733174972733Closed at 1733174972733 2024-12-02T21:29:32,734 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/.initializing 2024-12-02T21:29:32,734 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877 2024-12-02T21:29:32,737 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C35035%2C1733174971877, suffix=, logDir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877, archiveDir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/oldWALs, maxLogs=10 2024-12-02T21:29:32,737 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C35035%2C1733174971877.1733174972737 2024-12-02T21:29:32,742 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877/87c3fdb6c570%2C35035%2C1733174971877.1733174972737 2024-12-02T21:29:32,743 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40397:40397),(127.0.0.1/127.0.0.1:43747:43747)] 2024-12-02T21:29:32,744 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:29:32,744 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:29:32,744 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,745 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:29:32,748 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:32,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:29:32,749 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:29:32,750 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:32,751 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:29:32,751 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,752 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:29:32,753 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:32,753 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:29:32,753 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,755 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:29:32,755 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:32,756 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:29:32,756 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,756 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,757 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,758 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,758 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,759 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:29:32,759 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:29:32,761 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:29:32,762 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804479, jitterRate=0.022947967052459717}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:29:32,762 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733174972745Initializing all the Stores at 1733174972746 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174972746Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174972746Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174972746Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174972746Cleaning up temporary data from old regions at 1733174972758 (+12 ms)Region opened successfully at 1733174972762 (+4 ms) 2024-12-02T21:29:32,763 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:29:32,766 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e7c9c56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:29:32,767 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T21:29:32,767 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:29:32,767 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:29:32,767 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:29:32,767 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T21:29:32,768 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T21:29:32,768 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:29:32,770 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:29:32,770 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:29:32,818 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:29:32,818 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:29:32,819 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:29:32,828 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:29:32,829 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:29:32,830 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:29:32,839 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:29:32,841 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:29:32,849 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:29:32,853 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:29:32,860 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:29:32,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:29:32,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:29:32,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:32,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:32,871 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87c3fdb6c570,35035,1733174971877, sessionid=0x10197f349810000, setting cluster-up flag (Was=false) 2024-12-02T21:29:32,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:32,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:32,923 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:29:32,924 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,35035,1733174971877 2024-12-02T21:29:32,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:32,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:32,975 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:29:32,977 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,35035,1733174971877 2024-12-02T21:29:32,978 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T21:29:32,980 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T21:29:32,980 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T21:29:32,980 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:29:32,981 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87c3fdb6c570,35035,1733174971877 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:29:32,982 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:29:32,982 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:29:32,982 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:29:32,982 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:29:32,982 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87c3fdb6c570:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:29:32,983 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:32,983 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:29:32,983 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:32,984 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733175002984 2024-12-02T21:29:32,984 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:29:32,984 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:29:32,984 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:29:32,984 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:29:32,984 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:29:32,984 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:29:32,984 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:29:32,985 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:29:32,985 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:32,985 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:29:32,985 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:29:32,985 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:29:32,985 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:29:32,985 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:29:32,986 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174972985,5,FailOnTimeoutGroup] 2024-12-02T21:29:32,986 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:32,986 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174972986,5,FailOnTimeoutGroup] 2024-12-02T21:29:32,986 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:32,986 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:29:32,986 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:32,986 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:32,986 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:29:32,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:29:32,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43783 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:29:32,993 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T21:29:32,993 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a 2024-12-02T21:29:33,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:29:33,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43783 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:29:33,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:29:33,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:29:33,004 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:29:33,004 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:33,005 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:29:33,005 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:29:33,006 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:29:33,006 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:33,007 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:29:33,007 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:29:33,008 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:29:33,008 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:33,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:29:33,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:29:33,010 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:29:33,010 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:33,010 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:29:33,010 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:29:33,011 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740 2024-12-02T21:29:33,011 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740 2024-12-02T21:29:33,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:29:33,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:29:33,013 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:29:33,014 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(746): ClusterId : efb3c4a6-e72a-45e8-9a5b-d2da259bd8a9 2024-12-02T21:29:33,014 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:29:33,014 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:29:33,016 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:29:33,016 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856321, jitterRate=0.08886958658695221}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:29:33,017 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733174973002Initializing all the Stores at 1733174973003 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174973003Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174973003Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174973003Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174973003Cleaning up temporary data from old regions at 1733174973012 (+9 ms)Region opened successfully at 1733174973017 (+5 ms) 2024-12-02T21:29:33,017 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:29:33,017 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:29:33,017 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:29:33,017 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:29:33,017 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:29:33,017 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:29:33,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733174973017Disabling compacts and flushes for region at 1733174973017Disabling writes for close at 1733174973017Writing region close event to WAL at 1733174973017Closed at 1733174973017 2024-12-02T21:29:33,018 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:29:33,019 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:29:33,019 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:29:33,019 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T21:29:33,019 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:29:33,020 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:29:33,022 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:29:33,029 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:29:33,029 DEBUG [RS:0;87c3fdb6c570:42773 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38a27cee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:29:33,046 DEBUG [RS:0;87c3fdb6c570:42773 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87c3fdb6c570:42773 2024-12-02T21:29:33,046 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T21:29:33,046 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T21:29:33,046 DEBUG [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T21:29:33,047 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(2659): reportForDuty to master=87c3fdb6c570,35035,1733174971877 with port=42773, startcode=1733174972192 2024-12-02T21:29:33,047 DEBUG [RS:0;87c3fdb6c570:42773 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:29:33,049 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39865, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:29:33,049 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35035 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:33,049 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35035 {}] master.ServerManager(517): Registering regionserver=87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:33,051 DEBUG [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a 2024-12-02T21:29:33,051 DEBUG [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45281 2024-12-02T21:29:33,051 DEBUG [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T21:29:33,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:29:33,060 DEBUG [RS:0;87c3fdb6c570:42773 {}] zookeeper.ZKUtil(111): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:33,060 WARN [RS:0;87c3fdb6c570:42773 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:29:33,060 INFO [RS:0;87c3fdb6c570:42773 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:29:33,061 DEBUG [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:33,061 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87c3fdb6c570,42773,1733174972192] 2024-12-02T21:29:33,065 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:29:33,067 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:29:33,067 INFO [RS:0;87c3fdb6c570:42773 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:29:33,067 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,067 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T21:29:33,068 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T21:29:33,068 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,068 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:33,068 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:33,068 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:33,068 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:33,068 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:33,068 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:29:33,068 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:33,068 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:33,068 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:33,069 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:33,069 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:33,069 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:29:33,069 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:29:33,069 DEBUG [RS:0;87c3fdb6c570:42773 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:29:33,069 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,069 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,069 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,069 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,069 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,069 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,42773,1733174972192-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:29:33,084 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:29:33,084 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,42773,1733174972192-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,084 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,084 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.Replication(171): 87c3fdb6c570,42773,1733174972192 started 2024-12-02T21:29:33,096 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,096 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(1482): Serving as 87c3fdb6c570,42773,1733174972192, RpcServer on 87c3fdb6c570/172.17.0.3:42773, sessionid=0x10197f349810001 2024-12-02T21:29:33,096 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:29:33,096 DEBUG [RS:0;87c3fdb6c570:42773 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:33,096 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,42773,1733174972192' 2024-12-02T21:29:33,096 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:29:33,097 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:29:33,098 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:29:33,098 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:29:33,098 DEBUG [RS:0;87c3fdb6c570:42773 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:33,098 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,42773,1733174972192' 2024-12-02T21:29:33,098 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:29:33,098 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:29:33,099 DEBUG [RS:0;87c3fdb6c570:42773 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:29:33,099 INFO [RS:0;87c3fdb6c570:42773 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:29:33,099 INFO [RS:0;87c3fdb6c570:42773 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:29:33,172 WARN [87c3fdb6c570:35035 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T21:29:33,204 INFO [RS:0;87c3fdb6c570:42773 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C42773%2C1733174972192, suffix=, logDir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192, archiveDir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/oldWALs, maxLogs=32 2024-12-02T21:29:33,206 INFO [RS:0;87c3fdb6c570:42773 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42773%2C1733174972192.1733174973205 2024-12-02T21:29:33,214 INFO [RS:0;87c3fdb6c570:42773 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 2024-12-02T21:29:33,215 DEBUG [RS:0;87c3fdb6c570:42773 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43747:43747),(127.0.0.1/127.0.0.1:40397:40397)] 2024-12-02T21:29:33,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:33,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:33,422 DEBUG [87c3fdb6c570:35035 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:29:33,424 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:33,427 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,42773,1733174972192, state=OPENING 2024-12-02T21:29:33,439 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:29:33,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:33,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:33,525 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:29:33,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,42773,1733174972192}] 2024-12-02T21:29:33,525 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:29:33,525 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:29:33,684 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:29:33,689 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33269, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:29:33,695 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T21:29:33,695 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:29:33,698 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C42773%2C1733174972192.meta, suffix=.meta, logDir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192, archiveDir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/oldWALs, maxLogs=32 2024-12-02T21:29:33,699 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42773%2C1733174972192.meta.1733174973698.meta 2024-12-02T21:29:33,707 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.meta.1733174973698.meta 2024-12-02T21:29:33,708 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40397:40397),(127.0.0.1/127.0.0.1:43747:43747)] 2024-12-02T21:29:33,711 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:29:33,711 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:29:33,711 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:29:33,712 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:29:33,712 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:29:33,712 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:29:33,712 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T21:29:33,712 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T21:29:33,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:29:33,716 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:29:33,716 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:33,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:29:33,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:29:33,718 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:29:33,718 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:33,718 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:29:33,719 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:29:33,719 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:29:33,720 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:33,720 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:29:33,720 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:29:33,721 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:29:33,721 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:33,722 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:29:33,722 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:29:33,722 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740 2024-12-02T21:29:33,724 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740 2024-12-02T21:29:33,725 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:29:33,725 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:29:33,725 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:29:33,727 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:29:33,727 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=771212, jitterRate=-0.01935437321662903}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:29:33,727 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T21:29:33,728 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733174973712Writing region info on filesystem at 1733174973712Initializing all the Stores at 1733174973714 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174973714Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174973714Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174973714Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733174973714Cleaning up temporary data from old regions at 1733174973725 (+11 ms)Running coprocessor post-open hooks at 1733174973727 (+2 ms)Region opened successfully at 1733174973728 (+1 ms) 2024-12-02T21:29:33,729 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733174973683 2024-12-02T21:29:33,731 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:29:33,731 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T21:29:33,732 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:33,733 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,42773,1733174972192, state=OPEN 2024-12-02T21:29:33,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:29:33,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:29:33,775 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:33,775 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:29:33,775 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:29:33,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:29:33,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,42773,1733174972192 in 250 msec 2024-12-02T21:29:33,780 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:29:33,780 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 759 msec 2024-12-02T21:29:33,781 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:29:33,781 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T21:29:33,782 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:29:33,783 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,42773,1733174972192, seqNum=-1] 2024-12-02T21:29:33,783 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:29:33,784 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50467, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:29:33,790 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 810 msec 2024-12-02T21:29:33,790 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733174973790, completionTime=-1 2024-12-02T21:29:33,790 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:29:33,790 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T21:29:33,792 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T21:29:33,792 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733175033792 2024-12-02T21:29:33,792 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733175093792 2024-12-02T21:29:33,792 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-02T21:29:33,792 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35035,1733174971877-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,792 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35035,1733174971877-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,792 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35035,1733174971877-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,792 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87c3fdb6c570:35035, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,792 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,793 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,794 DEBUG [master/87c3fdb6c570:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T21:29:33,797 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.536sec 2024-12-02T21:29:33,797 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:29:33,797 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:29:33,797 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:29:33,797 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:29:33,797 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:29:33,797 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35035,1733174971877-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:29:33,797 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35035,1733174971877-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:29:33,800 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:29:33,800 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:29:33,800 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,35035,1733174971877-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:29:33,815 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@156f820b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:29:33,815 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87c3fdb6c570,35035,-1 for getting cluster id 2024-12-02T21:29:33,815 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T21:29:33,816 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'efb3c4a6-e72a-45e8-9a5b-d2da259bd8a9' 2024-12-02T21:29:33,817 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T21:29:33,817 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "efb3c4a6-e72a-45e8-9a5b-d2da259bd8a9" 2024-12-02T21:29:33,817 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f52015e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:29:33,817 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87c3fdb6c570,35035,-1] 2024-12-02T21:29:33,817 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T21:29:33,818 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:33,819 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39202, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T21:29:33,820 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@542bdd12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:29:33,820 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:29:33,821 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,42773,1733174972192, seqNum=-1] 2024-12-02T21:29:33,822 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:29:33,823 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36934, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:29:33,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87c3fdb6c570,35035,1733174971877 2024-12-02T21:29:33,826 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:29:33,828 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T21:29:33,828 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-02T21:29:33,828 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-02T21:29:33,828 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T21:29:33,829 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 87c3fdb6c570,35035,1733174971877 2024-12-02T21:29:33,830 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@14c86ab7 2024-12-02T21:29:33,830 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T21:29:33,831 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39212, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T21:29:33,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35035 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T21:29:33,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35035 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T21:29:33,832 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35035 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:29:33,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35035 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T21:29:33,835 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:29:33,835 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:33,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35035 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-02T21:29:33,836 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:29:33,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35035 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T21:29:33,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741835_1011 (size=395) 2024-12-02T21:29:33,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43783 is added to blk_1073741835_1011 (size=395) 2024-12-02T21:29:33,845 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3308cd961618a6484ed76991aef8535b, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a 2024-12-02T21:29:33,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43783 is added to blk_1073741836_1012 (size=78) 2024-12-02T21:29:33,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741836_1012 (size=78) 2024-12-02T21:29:33,852 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:29:33,853 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 3308cd961618a6484ed76991aef8535b, disabling compactions & flushes 2024-12-02T21:29:33,853 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:33,853 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:33,853 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. after waiting 0 ms 2024-12-02T21:29:33,853 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:33,853 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:33,853 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3308cd961618a6484ed76991aef8535b: Waiting for close lock at 1733174973852Disabling compacts and flushes for region at 1733174973853 (+1 ms)Disabling writes for close at 1733174973853Writing region close event to WAL at 1733174973853Closed at 1733174973853 2024-12-02T21:29:33,855 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:29:33,855 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733174973855"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733174973855"}]},"ts":"1733174973855"} 2024-12-02T21:29:33,858 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T21:29:33,859 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:29:33,860 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733174973860"}]},"ts":"1733174973860"} 2024-12-02T21:29:33,863 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-02T21:29:33,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3308cd961618a6484ed76991aef8535b, ASSIGN}] 2024-12-02T21:29:33,865 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3308cd961618a6484ed76991aef8535b, ASSIGN 2024-12-02T21:29:33,866 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3308cd961618a6484ed76991aef8535b, ASSIGN; state=OFFLINE, location=87c3fdb6c570,42773,1733174972192; forceNewPlan=false, retain=false 2024-12-02T21:29:34,017 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3308cd961618a6484ed76991aef8535b, regionState=OPENING, regionLocation=87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:34,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3308cd961618a6484ed76991aef8535b, ASSIGN because future has completed 2024-12-02T21:29:34,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3308cd961618a6484ed76991aef8535b, server=87c3fdb6c570,42773,1733174972192}] 2024-12-02T21:29:34,187 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:34,188 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3308cd961618a6484ed76991aef8535b, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:29:34,188 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:34,189 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:29:34,189 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:34,189 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:34,192 INFO [StoreOpener-3308cd961618a6484ed76991aef8535b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:34,193 INFO [StoreOpener-3308cd961618a6484ed76991aef8535b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3308cd961618a6484ed76991aef8535b columnFamilyName info 2024-12-02T21:29:34,193 DEBUG [StoreOpener-3308cd961618a6484ed76991aef8535b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:29:34,194 INFO [StoreOpener-3308cd961618a6484ed76991aef8535b-1 {}] regionserver.HStore(327): Store=3308cd961618a6484ed76991aef8535b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:29:34,194 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:34,195 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/default/TestLogRolling-testLogRollOnPipelineRestart/3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:34,195 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/default/TestLogRolling-testLogRollOnPipelineRestart/3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:34,195 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:34,195 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:34,197 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:34,199 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/default/TestLogRolling-testLogRollOnPipelineRestart/3308cd961618a6484ed76991aef8535b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:29:34,200 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3308cd961618a6484ed76991aef8535b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799456, jitterRate=0.016562119126319885}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:29:34,200 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:34,200 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3308cd961618a6484ed76991aef8535b: Running coprocessor pre-open hook at 1733174974189Writing region info on filesystem at 1733174974189Initializing all the Stores at 1733174974191 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733174974191Cleaning up temporary data from old regions at 1733174974195 (+4 ms)Running coprocessor post-open hooks at 1733174974200 (+5 ms)Region opened successfully at 1733174974200 2024-12-02T21:29:34,201 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b., pid=6, masterSystemTime=1733174974177 2024-12-02T21:29:34,204 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:34,204 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:34,205 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3308cd961618a6484ed76991aef8535b, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:34,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3308cd961618a6484ed76991aef8535b, server=87c3fdb6c570,42773,1733174972192 because future has completed 2024-12-02T21:29:34,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T21:29:34,212 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3308cd961618a6484ed76991aef8535b, server=87c3fdb6c570,42773,1733174972192 in 186 msec 2024-12-02T21:29:34,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T21:29:34,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3308cd961618a6484ed76991aef8535b, ASSIGN in 348 msec 2024-12-02T21:29:34,215 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:29:34,215 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733174974215"}]},"ts":"1733174974215"} 2024-12-02T21:29:34,217 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-02T21:29:34,218 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:29:34,220 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 386 msec 2024-12-02T21:29:34,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:34,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:35,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:35,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:36,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T21:29:36,071 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T21:29:36,072 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T21:29:36,073 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-02T21:29:36,073 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:29:36,073 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T21:29:36,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:36,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:37,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:37,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:38,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:38,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:38,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:38,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:38,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:38,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:38,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:38,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:38,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:38,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:38,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:38,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:39,236 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:29:39,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:39,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:39,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:39,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:39,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:39,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:39,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:39,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:39,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:39,259 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:29:39,264 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T21:29:39,264 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-02T21:29:39,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:39,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:40,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:40,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:41,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:41,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:42,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:42,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:43,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:43,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:43,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35035 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T21:29:43,942 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-02T21:29:43,943 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-02T21:29:43,946 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T21:29:43,946 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:43,950 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b., hostname=87c3fdb6c570,42773,1733174972192, seqNum=2] 2024-12-02T21:29:44,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:44,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:45,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:45,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:45,953 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 2024-12-02T21:29:45,955 WARN [ResponseProcessor for block BP-725672116-172.17.0.3-1733174969661:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-725672116-172.17.0.3-1733174969661:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-725672116-172.17.0.3-1733174969661:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:35055,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:45,955 WARN [ResponseProcessor for block BP-725672116-172.17.0.3-1733174969661:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-725672116-172.17.0.3-1733174969661:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-725672116-172.17.0.3-1733174969661:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:35055,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:45,955 WARN [ResponseProcessor for block BP-725672116-172.17.0.3-1733174969661:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-725672116-172.17.0.3-1733174969661:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:45,957 WARN [DataStreamer for file /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.meta.1733174973698.meta block BP-725672116-172.17.0.3-1733174969661:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-725672116-172.17.0.3-1733174969661:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK], DatanodeInfoWithStorage[127.0.0.1:35055,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35055,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK]) is bad. 2024-12-02T21:29:45,957 WARN [DataStreamer for file /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 block BP-725672116-172.17.0.3-1733174969661:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-725672116-172.17.0.3-1733174969661:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35055,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK], DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35055,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK]) is bad. 2024-12-02T21:29:45,957 WARN [DataStreamer for file /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877/87c3fdb6c570%2C35035%2C1733174971877.1733174972737 block BP-725672116-172.17.0.3-1733174969661:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-725672116-172.17.0.3-1733174969661:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK], DatanodeInfoWithStorage[127.0.0.1:35055,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35055,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK]) is bad. 2024-12-02T21:29:45,957 WARN [PacketResponder: BP-725672116-172.17.0.3-1733174969661:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35055] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:45,957 WARN [PacketResponder: BP-725672116-172.17.0.3-1733174969661:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35055] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:45,958 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_547691960_22 at /127.0.0.1:54450 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54450 dst: /127.0.0.1:43783 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:45,959 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_578743991_22 at /127.0.0.1:54500 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54500 dst: /127.0.0.1:43783 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:45,958 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_578743991_22 at /127.0.0.1:57584 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35055:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57584 dst: /127.0.0.1:35055 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:45,959 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_578743991_22 at /127.0.0.1:57588 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35055:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57588 dst: /127.0.0.1:35055 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:45,959 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_547691960_22 at /127.0.0.1:57542 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35055:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57542 dst: /127.0.0.1:35055 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:45,959 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_578743991_22 at /127.0.0.1:54488 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54488 dst: /127.0.0.1:43783 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:46,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c9b16c7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:46,119 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25cefabc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:29:46,119 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:29:46,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b27dfb0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:29:46,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16a8f4c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,STOPPED} 2024-12-02T21:29:46,121 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:29:46,121 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-725672116-172.17.0.3-1733174969661 (Datanode Uuid 471ad00b-f454-4e19-9fa8-4a8c791495a3) service to localhost/127.0.0.1:45281 2024-12-02T21:29:46,122 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data3/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:46,122 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data4/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:46,123 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:29:46,123 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:29:46,123 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:29:46,134 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:29:46,138 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:29:46,139 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:29:46,139 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:29:46,139 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:29:46,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59e94e8a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:29:46,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e979747{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:29:46,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59462733{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/java.io.tmpdir/jetty-localhost-42685-hadoop-hdfs-3_4_1-tests_jar-_-any-7612981853078139652/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:46,228 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f8a0d0d{HTTP/1.1, (http/1.1)}{localhost:42685} 2024-12-02T21:29:46,228 INFO [Time-limited test {}] server.Server(415): Started @176220ms 2024-12-02T21:29:46,229 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:29:46,245 WARN [ResponseProcessor for block BP-725672116-172.17.0.3-1733174969661:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-725672116-172.17.0.3-1733174969661:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:46,245 WARN [ResponseProcessor for block BP-725672116-172.17.0.3-1733174969661:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-725672116-172.17.0.3-1733174969661:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:46,245 WARN [ResponseProcessor for block BP-725672116-172.17.0.3-1733174969661:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-725672116-172.17.0.3-1733174969661:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:46,245 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_578743991_22 at /127.0.0.1:44208 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44208 dst: /127.0.0.1:43783 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:46,245 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_578743991_22 at /127.0.0.1:44214 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44214 dst: /127.0.0.1:43783 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:46,245 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_547691960_22 at /127.0.0.1:44226 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44226 dst: /127.0.0.1:43783 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:46,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@89ebf29{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:46,249 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a4f8c69{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:29:46,249 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:29:46,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7353ad08{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:29:46,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e8ebafe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,STOPPED} 2024-12-02T21:29:46,251 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:29:46,251 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:29:46,251 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:29:46,251 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-725672116-172.17.0.3-1733174969661 (Datanode Uuid 522a7cfd-5a57-46ca-91ce-4908e9afdae4) service to localhost/127.0.0.1:45281 2024-12-02T21:29:46,251 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data1/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:46,251 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data2/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:46,252 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:29:46,257 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:29:46,260 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:29:46,260 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:29:46,260 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:29:46,260 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:29:46,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a40ff76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:29:46,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4022a798{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:29:46,349 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a362569{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/java.io.tmpdir/jetty-localhost-42703-hadoop-hdfs-3_4_1-tests_jar-_-any-10451899684180116762/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:46,349 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@49f94f8{HTTP/1.1, (http/1.1)}{localhost:42703} 2024-12-02T21:29:46,349 INFO [Time-limited test {}] server.Server(415): Started @176341ms 2024-12-02T21:29:46,350 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:29:46,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:46,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:46,783 WARN [Thread-1348 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:29:46,785 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6bc79299cc5fa122 with lease ID 0xa7fed51b47c58645: from storage DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a node DatanodeRegistration(127.0.0.1:39627, datanodeUuid=471ad00b-f454-4e19-9fa8-4a8c791495a3, infoPort=39819, infoSecurePort=0, ipcPort=45445, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:46,785 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6bc79299cc5fa122 with lease ID 0xa7fed51b47c58645: from storage DS-ff02a3f6-cbb2-4239-b3f9-b23027de1920 node DatanodeRegistration(127.0.0.1:39627, datanodeUuid=471ad00b-f454-4e19-9fa8-4a8c791495a3, infoPort=39819, infoSecurePort=0, ipcPort=45445, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:46,924 WARN [Thread-1368 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:29:46,927 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45cc251ff6a002b2 with lease ID 0xa7fed51b47c58646: from storage DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3 node DatanodeRegistration(127.0.0.1:42401, datanodeUuid=522a7cfd-5a57-46ca-91ce-4908e9afdae4, infoPort=33001, infoSecurePort=0, ipcPort=46403, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:46,927 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45cc251ff6a002b2 with lease ID 0xa7fed51b47c58646: from storage DS-a198ace5-6920-49b6-9225-03b59d60790f node DatanodeRegistration(127.0.0.1:42401, datanodeUuid=522a7cfd-5a57-46ca-91ce-4908e9afdae4, infoPort=33001, infoSecurePort=0, ipcPort=46403, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:47,367 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-02T21:29:47,378 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-02T21:29:47,380 ERROR [FSHLog-0-hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a-prefix:87c3fdb6c570,42773,1733174972192 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:47,380 WARN [FSHLog-0-hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a-prefix:87c3fdb6c570,42773,1733174972192 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:47,380 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C42773%2C1733174972192:(num 1733174973205) roll requested 2024-12-02T21:29:47,381 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42773%2C1733174972192.1733174987380 2024-12-02T21:29:47,385 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 newFile=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 2024-12-02T21:29:47,386 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:47,386 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:47,386 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:47,386 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:47,386 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:47,386 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 2024-12-02T21:29:47,387 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:47,387 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:47,387 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 2024-12-02T21:29:47,387 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33001:33001),(127.0.0.1/127.0.0.1:39819:39819)] 2024-12-02T21:29:47,387 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 is not closed yet, will try archiving it next time 2024-12-02T21:29:47,387 WARN [IPC Server handler 1 on default port 45281 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-12-02T21:29:47,387 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 after 0ms 2024-12-02T21:29:47,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:47,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:48,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:48,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:48,785 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T21:29:49,393 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-02T21:29:49,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:49,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:50,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:50,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:51,389 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 after 4002ms 2024-12-02T21:29:51,401 WARN [ResponseProcessor for block BP-725672116-172.17.0.3-1733174969661:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-725672116-172.17.0.3-1733174969661:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:51,402 WARN [DataStreamer for file /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 block BP-725672116-172.17.0.3-1733174969661:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-725672116-172.17.0.3-1733174969661:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42401,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK], DatanodeInfoWithStorage[127.0.0.1:39627,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42401,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]) is bad. 2024-12-02T21:29:51,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_578743991_22 at /127.0.0.1:48414 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42401:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48414 dst: /127.0.0.1:42401 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:51,404 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_578743991_22 at /127.0.0.1:50118 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50118 dst: /127.0.0.1:39627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:51,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a362569{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:51,408 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@49f94f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:29:51,408 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:29:51,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4022a798{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:29:51,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a40ff76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,STOPPED} 2024-12-02T21:29:51,409 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:29:51,409 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:29:51,409 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-725672116-172.17.0.3-1733174969661 (Datanode Uuid 522a7cfd-5a57-46ca-91ce-4908e9afdae4) service to localhost/127.0.0.1:45281 2024-12-02T21:29:51,409 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:29:51,410 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data1/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:51,410 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data2/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:51,410 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:29:51,426 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:29:51,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:51,428 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:29:51,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:29:51,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:29:51,429 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:29:51,429 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61aceb4d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:29:51,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21d94b42{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:29:51,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:51,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@424d5648{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/java.io.tmpdir/jetty-localhost-42163-hadoop-hdfs-3_4_1-tests_jar-_-any-15986544660962976725/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:51,518 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a0cdfff{HTTP/1.1, (http/1.1)}{localhost:42163} 2024-12-02T21:29:51,518 INFO [Time-limited test {}] server.Server(415): Started @181510ms 2024-12-02T21:29:51,519 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:29:51,533 WARN [ResponseProcessor for block BP-725672116-172.17.0.3-1733174969661:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-725672116-172.17.0.3-1733174969661:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:51,534 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_578743991_22 at /127.0.0.1:50136 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50136 dst: /127.0.0.1:39627 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:51,535 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59462733{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:51,535 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f8a0d0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:29:51,535 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:29:51,536 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e979747{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:29:51,536 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59e94e8a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,STOPPED} 2024-12-02T21:29:51,536 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:29:51,536 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-725672116-172.17.0.3-1733174969661 (Datanode Uuid 471ad00b-f454-4e19-9fa8-4a8c791495a3) service to localhost/127.0.0.1:45281 2024-12-02T21:29:51,536 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:29:51,537 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:29:51,537 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data3/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:51,537 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data4/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:29:51,537 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:29:51,548 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:29:51,553 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:29:51,556 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:29:51,556 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:29:51,556 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:29:51,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20da775a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:29:51,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e79e191{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:29:51,648 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75d9c3d8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/java.io.tmpdir/jetty-localhost-35139-hadoop-hdfs-3_4_1-tests_jar-_-any-16824460356983021604/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:29:51,648 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21ffcd24{HTTP/1.1, (http/1.1)}{localhost:35139} 2024-12-02T21:29:51,648 INFO [Time-limited test {}] server.Server(415): Started @181640ms 2024-12-02T21:29:51,650 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:29:52,018 WARN [Thread-1422 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:29:52,020 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd5f76eda132b37 with lease ID 0xa7fed51b47c58647: from storage DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3 node DatanodeRegistration(127.0.0.1:40743, datanodeUuid=522a7cfd-5a57-46ca-91ce-4908e9afdae4, infoPort=46689, infoSecurePort=0, ipcPort=42795, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:52,020 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd5f76eda132b37 with lease ID 0xa7fed51b47c58647: from storage DS-a198ace5-6920-49b6-9225-03b59d60790f node DatanodeRegistration(127.0.0.1:40743, datanodeUuid=522a7cfd-5a57-46ca-91ce-4908e9afdae4, infoPort=46689, infoSecurePort=0, ipcPort=42795, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:52,122 WARN [Thread-1442 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:29:52,124 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3f691ebf5556a99 with lease ID 0xa7fed51b47c58648: from storage DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a node DatanodeRegistration(127.0.0.1:38805, datanodeUuid=471ad00b-f454-4e19-9fa8-4a8c791495a3, infoPort=42675, infoSecurePort=0, ipcPort=35117, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:52,124 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3f691ebf5556a99 with lease ID 0xa7fed51b47c58648: from storage DS-ff02a3f6-cbb2-4239-b3f9-b23027de1920 node DatanodeRegistration(127.0.0.1:38805, datanodeUuid=471ad00b-f454-4e19-9fa8-4a8c791495a3, infoPort=42675, infoSecurePort=0, ipcPort=35117, storageInfo=lv=-57;cid=testClusterID;nsid=1607175291;c=1733174969661), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:29:52,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:52,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:52,669 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-02T21:29:52,675 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-02T21:29:52,677 ERROR [FSHLog-0-hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a-prefix:87c3fdb6c570,42773,1733174972192 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39627,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:52,677 WARN [FSHLog-0-hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a-prefix:87c3fdb6c570,42773,1733174972192 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39627,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:52,677 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C42773%2C1733174972192:(num 1733174987380) roll requested 2024-12-02T21:29:52,678 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42773%2C1733174972192.1733174992677 2024-12-02T21:29:52,685 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 newFile=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174992677 2024-12-02T21:29:52,685 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:52,686 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:52,686 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:52,686 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:52,686 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:52,686 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174992677 2024-12-02T21:29:52,687 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39627,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:52,687 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39627,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:52,687 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 2024-12-02T21:29:52,687 WARN [IPC Server handler 2 on default port 45281 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-02T21:29:52,688 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 after 1ms 2024-12-02T21:29:52,688 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42675:42675),(127.0.0.1/127.0.0.1:46689:46689)] 2024-12-02T21:29:52,688 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 is not closed yet, will try archiving it next time 2024-12-02T21:29:53,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:53,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:54,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:54,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:54,690 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42773%2C1733174972192.1733174994690 2024-12-02T21:29:54,704 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174992677 newFile=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 2024-12-02T21:29:54,704 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:54,704 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:54,705 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:54,705 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:54,705 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:54,705 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174992677 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 2024-12-02T21:29:54,707 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46689:46689),(127.0.0.1/127.0.0.1:42675:42675)] 2024-12-02T21:29:54,707 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 is not closed yet, will try archiving it next time 2024-12-02T21:29:54,707 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174992677 is not closed yet, will try archiving it next time 2024-12-02T21:29:54,707 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 2024-12-02T21:29:54,707 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 2024-12-02T21:29:54,708 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 after 1ms 2024-12-02T21:29:54,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741838_1019 (size=1264) 2024-12-02T21:29:54,708 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 2024-12-02T21:29:54,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741838_1019 (size=1264) 2024-12-02T21:29:54,709 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 is not closed yet, will try archiving it next time 2024-12-02T21:29:54,721 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733174974200/Put/vlen=218/seqid=0] 2024-12-02T21:29:54,722 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733174983951/Put/vlen=1045/seqid=0] 2024-12-02T21:29:54,722 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174973205 2024-12-02T21:29:54,722 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 2024-12-02T21:29:54,722 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 2024-12-02T21:29:54,722 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 after 0ms 2024-12-02T21:29:54,722 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 2024-12-02T21:29:54,725 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733174987380/Put/vlen=1045/seqid=0] 2024-12-02T21:29:54,725 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733174989396/Put/vlen=1045/seqid=0] 2024-12-02T21:29:54,725 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 2024-12-02T21:29:54,725 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174992677 2024-12-02T21:29:54,725 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174992677 2024-12-02T21:29:54,726 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174992677 after 1ms 2024-12-02T21:29:54,726 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174992677 2024-12-02T21:29:54,729 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733174992677/Put/vlen=1045/seqid=0] 2024-12-02T21:29:54,729 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 2024-12-02T21:29:54,729 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 2024-12-02T21:29:54,730 WARN [IPC Server handler 4 on default port 45281 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-02T21:29:54,730 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 after 1ms 2024-12-02T21:29:55,021 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T21:29:55,133 WARN [ResponseProcessor for block BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:55,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_547691960_22 at /127.0.0.1:48052 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40743:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48052 dst: /127.0.0.1:40743 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:40743 remote=/127.0.0.1:48052]. Total timeout mills is 60000, 59570 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:55,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_547691960_22 at /127.0.0.1:51022 [Receiving block BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:38805:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51022 dst: /127.0.0.1:38805 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:29:55,133 WARN [DataStreamer for file /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 block BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40743,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK], DatanodeInfoWithStorage[127.0.0.1:38805,DS-82fcc02c-d52f-4c4e-9b78-38365e425a2a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40743,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]) is bad. 2024-12-02T21:29:55,134 WARN [DataStreamer for file /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 block BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:55,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741839_1022 (size=85) 2024-12-02T21:29:55,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:55,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:56,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:56,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:56,690 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174987380 after 4003ms 2024-12-02T21:29:57,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:57,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:58,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:58,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:58,732 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 after 4003ms 2024-12-02T21:29:58,732 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 2024-12-02T21:29:58,743 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 2024-12-02T21:29:58,744 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-02T21:29:58,745 ERROR [FSHLog-0-hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a-prefix:87c3fdb6c570,42773,1733174972192.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:58,745 WARN [FSHLog-0-hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a-prefix:87c3fdb6c570,42773,1733174972192.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:58,745 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C42773%2C1733174972192.meta:.meta(num 1733174973698) roll requested 2024-12-02T21:29:58,745 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42773%2C1733174972192.meta.1733174998745.meta 2024-12-02T21:29:58,752 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:58,753 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:58,753 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:58,753 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:58,753 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:58,753 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.meta.1733174973698.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.meta.1733174998745.meta 2024-12-02T21:29:58,754 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:58,754 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:58,754 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.meta.1733174973698.meta 2024-12-02T21:29:58,754 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42675:42675),(127.0.0.1/127.0.0.1:46689:46689)] 2024-12-02T21:29:58,755 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.meta.1733174973698.meta is not closed yet, will try archiving it next time 2024-12-02T21:29:58,755 WARN [IPC Server handler 3 on default port 45281 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.meta.1733174973698.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-12-02T21:29:58,755 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.meta.1733174973698.meta after 1ms 2024-12-02T21:29:58,772 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/.tmp/info/a9e39ac47bf147968ff7184809b0a0a8 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b./info:regioninfo/1733174974205/Put/seqid=0 2024-12-02T21:29:58,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741841_1025 (size=7125) 2024-12-02T21:29:58,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741841_1025 (size=7125) 2024-12-02T21:29:58,777 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/.tmp/info/a9e39ac47bf147968ff7184809b0a0a8 2024-12-02T21:29:58,795 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/.tmp/ns/5c1534cf7a96415eb32cc227a844ba41 is 43, key is default/ns:d/1733174973785/Put/seqid=0 2024-12-02T21:29:58,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741842_1026 (size=5153) 2024-12-02T21:29:58,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741842_1026 (size=5153) 2024-12-02T21:29:58,801 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/.tmp/ns/5c1534cf7a96415eb32cc227a844ba41 2024-12-02T21:29:58,817 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/.tmp/table/404a6c9a79f04e30b149387efa4e3be4 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733174974215/Put/seqid=0 2024-12-02T21:29:58,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741843_1027 (size=5438) 2024-12-02T21:29:58,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741843_1027 (size=5438) 2024-12-02T21:29:58,823 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/.tmp/table/404a6c9a79f04e30b149387efa4e3be4 2024-12-02T21:29:58,828 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/.tmp/info/a9e39ac47bf147968ff7184809b0a0a8 as hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/info/a9e39ac47bf147968ff7184809b0a0a8 2024-12-02T21:29:58,834 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/info/a9e39ac47bf147968ff7184809b0a0a8, entries=10, sequenceid=11, filesize=7.0 K 2024-12-02T21:29:58,835 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/.tmp/ns/5c1534cf7a96415eb32cc227a844ba41 as hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/ns/5c1534cf7a96415eb32cc227a844ba41 2024-12-02T21:29:58,840 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/ns/5c1534cf7a96415eb32cc227a844ba41, entries=2, sequenceid=11, filesize=5.0 K 2024-12-02T21:29:58,841 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/.tmp/table/404a6c9a79f04e30b149387efa4e3be4 as hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/table/404a6c9a79f04e30b149387efa4e3be4 2024-12-02T21:29:58,847 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/table/404a6c9a79f04e30b149387efa4e3be4, entries=2, sequenceid=11, filesize=5.3 K 2024-12-02T21:29:58,848 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=11, compaction requested=false 2024-12-02T21:29:58,848 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-02T21:29:58,849 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3308cd961618a6484ed76991aef8535b 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-02T21:29:58,849 ERROR [FSHLog-0-hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a-prefix:87c3fdb6c570,42773,1733174972192 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:58,849 WARN [FSHLog-0-hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a-prefix:87c3fdb6c570,42773,1733174972192 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:58,850 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C42773%2C1733174972192:(num 1733174994690) roll requested 2024-12-02T21:29:58,850 INFO [regionserver/87c3fdb6c570:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C42773%2C1733174972192.1733174998850 2024-12-02T21:29:58,855 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 newFile=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174998850 2024-12-02T21:29:58,855 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:58,855 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:58,855 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:58,855 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:58,855 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:58,855 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174998850 2024-12-02T21:29:58,855 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:58,856 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-725672116-172.17.0.3-1733174969661:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:29:58,856 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 2024-12-02T21:29:58,856 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 after 0ms 2024-12-02T21:29:58,857 DEBUG [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42675:42675),(127.0.0.1/127.0.0.1:46689:46689)] 2024-12-02T21:29:58,857 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 to hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/oldWALs/87c3fdb6c570%2C42773%2C1733174972192.1733174994690 2024-12-02T21:29:58,869 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/default/TestLogRolling-testLogRollOnPipelineRestart/3308cd961618a6484ed76991aef8535b/.tmp/info/c1591f2233d845be913a3c089a582850 is 1080, key is row1002/info:/1733174983951/Put/seqid=0 2024-12-02T21:29:58,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741845_1029 (size=9270) 2024-12-02T21:29:58,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741845_1029 (size=9270) 2024-12-02T21:29:58,874 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/default/TestLogRolling-testLogRollOnPipelineRestart/3308cd961618a6484ed76991aef8535b/.tmp/info/c1591f2233d845be913a3c089a582850 2024-12-02T21:29:58,880 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/default/TestLogRolling-testLogRollOnPipelineRestart/3308cd961618a6484ed76991aef8535b/.tmp/info/c1591f2233d845be913a3c089a582850 as hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/default/TestLogRolling-testLogRollOnPipelineRestart/3308cd961618a6484ed76991aef8535b/info/c1591f2233d845be913a3c089a582850 2024-12-02T21:29:58,885 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/default/TestLogRolling-testLogRollOnPipelineRestart/3308cd961618a6484ed76991aef8535b/info/c1591f2233d845be913a3c089a582850, entries=4, sequenceid=8, filesize=9.1 K 2024-12-02T21:29:58,886 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 3308cd961618a6484ed76991aef8535b in 38ms, sequenceid=8, compaction requested=false 2024-12-02T21:29:58,886 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 3308cd961618a6484ed76991aef8535b: 2024-12-02T21:29:58,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T21:29:58,891 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:29:58,891 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:29:58,891 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:58,891 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:58,891 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T21:29:58,891 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:29:58,891 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=954507330, stopped=false 2024-12-02T21:29:58,892 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87c3fdb6c570,35035,1733174971877 2024-12-02T21:29:58,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:29:58,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:29:58,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:58,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:29:58,942 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:29:58,943 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:29:58,943 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:29:58,943 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:29:58,943 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:58,944 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:29:58,944 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87c3fdb6c570,42773,1733174972192' ***** 2024-12-02T21:29:58,944 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T21:29:58,945 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:29:58,945 INFO [RS:0;87c3fdb6c570:42773 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:29:58,945 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T21:29:58,945 INFO [RS:0;87c3fdb6c570:42773 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:29:58,945 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(3091): Received CLOSE for 3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:58,946 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(959): stopping server 87c3fdb6c570,42773,1733174972192 2024-12-02T21:29:58,946 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:29:58,946 INFO [RS:0;87c3fdb6c570:42773 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87c3fdb6c570:42773. 2024-12-02T21:29:58,946 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3308cd961618a6484ed76991aef8535b, disabling compactions & flushes 2024-12-02T21:29:58,946 DEBUG [RS:0;87c3fdb6c570:42773 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:29:58,946 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:58,946 DEBUG [RS:0;87c3fdb6c570:42773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:29:58,946 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:58,946 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. after waiting 0 ms 2024-12-02T21:29:58,947 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:29:58,947 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:58,947 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:29:58,947 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:29:58,947 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T21:29:58,947 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T21:29:58,947 DEBUG [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 3308cd961618a6484ed76991aef8535b=TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b.} 2024-12-02T21:29:58,948 DEBUG [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3308cd961618a6484ed76991aef8535b 2024-12-02T21:29:58,948 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:29:58,948 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:29:58,948 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:29:58,948 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:29:58,948 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:29:58,952 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/default/TestLogRolling-testLogRollOnPipelineRestart/3308cd961618a6484ed76991aef8535b/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-02T21:29:58,953 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:58,953 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3308cd961618a6484ed76991aef8535b: Waiting for close lock at 1733174998946Running coprocessor pre-close hooks at 1733174998946Disabling compacts and flushes for region at 1733174998946Disabling writes for close at 1733174998947 (+1 ms)Writing region close event to WAL at 1733174998948 (+1 ms)Running coprocessor post-close hooks at 1733174998953 (+5 ms)Closed at 1733174998953 2024-12-02T21:29:58,953 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-02T21:29:58,953 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733174973832.3308cd961618a6484ed76991aef8535b. 2024-12-02T21:29:58,954 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:29:58,954 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:29:58,954 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733174998947Running coprocessor pre-close hooks at 1733174998947Disabling compacts and flushes for region at 1733174998947Disabling writes for close at 1733174998948 (+1 ms)Writing region close event to WAL at 1733174998950 (+2 ms)Running coprocessor post-close hooks at 1733174998954 (+4 ms)Closed at 1733174998954 2024-12-02T21:29:58,954 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:29:59,071 INFO [regionserver/87c3fdb6c570:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:29:59,075 INFO [regionserver/87c3fdb6c570:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T21:29:59,075 INFO [regionserver/87c3fdb6c570:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T21:29:59,148 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(976): stopping server 87c3fdb6c570,42773,1733174972192; all regions closed. 2024-12-02T21:29:59,149 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:59,149 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:59,150 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:59,150 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:59,150 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:29:59,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741840_1023 (size=825) 2024-12-02T21:29:59,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741840_1023 (size=825) 2024-12-02T21:29:59,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:29:59,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:00,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:00,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:01,125 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T21:30:01,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:01,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:01,859 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:30:02,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:02,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:02,756 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.meta.1733174973698.meta after 4002ms 2024-12-02T21:30:02,756 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/WALs/87c3fdb6c570,42773,1733174972192/87c3fdb6c570%2C42773%2C1733174972192.meta.1733174973698.meta to hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/oldWALs/87c3fdb6c570%2C42773%2C1733174972192.meta.1733174973698.meta 2024-12-02T21:30:02,759 DEBUG [RS:0;87c3fdb6c570:42773 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/oldWALs 2024-12-02T21:30:02,759 INFO [RS:0;87c3fdb6c570:42773 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C42773%2C1733174972192.meta:.meta(num 1733174998745) 2024-12-02T21:30:02,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:02,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:02,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:02,760 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:02,760 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:02,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741844_1028 (size=1162) 2024-12-02T21:30:02,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741844_1028 (size=1162) 2024-12-02T21:30:02,812 DEBUG [RS:0;87c3fdb6c570:42773 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/oldWALs 2024-12-02T21:30:02,812 INFO [RS:0;87c3fdb6c570:42773 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C42773%2C1733174972192:(num 1733174998850) 2024-12-02T21:30:02,812 DEBUG [RS:0;87c3fdb6c570:42773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:30:02,812 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:30:02,812 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:30:02,812 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.ChoreService(370): Chore service for: regionserver/87c3fdb6c570:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T21:30:02,812 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:30:02,812 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:30:02,812 INFO [RS:0;87c3fdb6c570:42773 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42773 2024-12-02T21:30:02,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87c3fdb6c570,42773,1733174972192 2024-12-02T21:30:02,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:30:02,861 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:30:02,878 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87c3fdb6c570,42773,1733174972192] 2024-12-02T21:30:02,889 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87c3fdb6c570,42773,1733174972192 already deleted, retry=false 2024-12-02T21:30:02,889 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87c3fdb6c570,42773,1733174972192 expired; onlineServers=0 2024-12-02T21:30:02,889 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87c3fdb6c570,35035,1733174971877' ***** 2024-12-02T21:30:02,889 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:30:02,889 INFO [M:0;87c3fdb6c570:35035 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:30:02,889 INFO [M:0;87c3fdb6c570:35035 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:30:02,889 DEBUG [M:0;87c3fdb6c570:35035 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:30:02,889 DEBUG [M:0;87c3fdb6c570:35035 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:30:02,889 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:30:02,889 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174972986 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733174972986,5,FailOnTimeoutGroup] 2024-12-02T21:30:02,889 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174972985 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733174972985,5,FailOnTimeoutGroup] 2024-12-02T21:30:02,889 INFO [M:0;87c3fdb6c570:35035 {}] hbase.ChoreService(370): Chore service for: master/87c3fdb6c570:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T21:30:02,890 INFO [M:0;87c3fdb6c570:35035 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:30:02,890 DEBUG [M:0;87c3fdb6c570:35035 {}] master.HMaster(1795): Stopping service threads 2024-12-02T21:30:02,890 INFO [M:0;87c3fdb6c570:35035 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:30:02,890 INFO [M:0;87c3fdb6c570:35035 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:30:02,890 INFO [M:0;87c3fdb6c570:35035 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:30:02,890 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:30:02,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:30:02,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:02,900 DEBUG [M:0;87c3fdb6c570:35035 {}] zookeeper.ZKUtil(347): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:30:02,900 WARN [M:0;87c3fdb6c570:35035 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:30:02,900 INFO [M:0;87c3fdb6c570:35035 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/.lastflushedseqids 2024-12-02T21:30:02,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741846_1030 (size=139) 2024-12-02T21:30:02,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741846_1030 (size=139) 2024-12-02T21:30:02,906 INFO [M:0;87c3fdb6c570:35035 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T21:30:02,906 INFO [M:0;87c3fdb6c570:35035 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:30:02,906 DEBUG [M:0;87c3fdb6c570:35035 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:30:02,906 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:30:02,906 DEBUG [M:0;87c3fdb6c570:35035 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:30:02,906 DEBUG [M:0;87c3fdb6c570:35035 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:30:02,906 DEBUG [M:0;87c3fdb6c570:35035 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:30:02,906 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-12-02T21:30:02,907 ERROR [FSHLog-0-hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData-prefix:87c3fdb6c570,35035,1733174971877 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:30:02,907 WARN [FSHLog-0-hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData-prefix:87c3fdb6c570,35035,1733174971877 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:30:02,907 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 87c3fdb6c570%2C35035%2C1733174971877:(num 1733174972737) roll requested 2024-12-02T21:30:02,907 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C35035%2C1733174971877.1733175002907 2024-12-02T21:30:02,912 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:02,912 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:02,912 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:02,912 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:02,912 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:02,913 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877/87c3fdb6c570%2C35035%2C1733174971877.1733174972737 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877/87c3fdb6c570%2C35035%2C1733174971877.1733175002907 2024-12-02T21:30:02,913 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:30:02,913 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43783,DS-aed1ec14-f51e-4de0-80a2-d8d577bdbbf3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:30:02,913 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877/87c3fdb6c570%2C35035%2C1733174971877.1733174972737 2024-12-02T21:30:02,913 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46689:46689),(127.0.0.1/127.0.0.1:42675:42675)] 2024-12-02T21:30:02,913 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877/87c3fdb6c570%2C35035%2C1733174971877.1733174972737 is not closed yet, will try archiving it next time 2024-12-02T21:30:02,914 WARN [IPC Server handler 4 on default port 45281 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877/87c3fdb6c570%2C35035%2C1733174971877.1733174972737 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-02T21:30:02,914 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877/87c3fdb6c570%2C35035%2C1733174971877.1733174972737 after 1ms 2024-12-02T21:30:02,926 DEBUG [M:0;87c3fdb6c570:35035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d35f7916d8674a219dab7b0e3a57dfb3 is 82, key is hbase:meta,,1/info:regioninfo/1733174973732/Put/seqid=0 2024-12-02T21:30:02,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741848_1033 (size=5672) 2024-12-02T21:30:02,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741848_1033 (size=5672) 2024-12-02T21:30:02,931 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d35f7916d8674a219dab7b0e3a57dfb3 2024-12-02T21:30:02,949 DEBUG [M:0;87c3fdb6c570:35035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5001d591be8944f3a052735f68f74b44 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733174974219/Put/seqid=0 2024-12-02T21:30:02,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741849_1034 (size=6118) 2024-12-02T21:30:02,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741849_1034 (size=6118) 2024-12-02T21:30:02,954 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5001d591be8944f3a052735f68f74b44 2024-12-02T21:30:02,970 DEBUG [M:0;87c3fdb6c570:35035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1464ba21f9a246f4823222ca02fa12d5 is 69, key is 87c3fdb6c570,42773,1733174972192/rs:state/1733174973050/Put/seqid=0 2024-12-02T21:30:02,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741850_1035 (size=5156) 2024-12-02T21:30:02,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741850_1035 (size=5156) 2024-12-02T21:30:02,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:30:02,979 INFO [RS:0;87c3fdb6c570:42773 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:30:02,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42773-0x10197f349810001, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:30:02,979 INFO [RS:0;87c3fdb6c570:42773 {}] regionserver.HRegionServer(1031): Exiting; stopping=87c3fdb6c570,42773,1733174972192; zookeeper connection closed. 2024-12-02T21:30:02,979 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@653dba03 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@653dba03 2024-12-02T21:30:02,979 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T21:30:03,376 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1464ba21f9a246f4823222ca02fa12d5 2024-12-02T21:30:03,394 DEBUG [M:0;87c3fdb6c570:35035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4e0afdc9adc84535a655a463979bc57c is 52, key is load_balancer_on/state:d/1733174973827/Put/seqid=0 2024-12-02T21:30:03,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741851_1036 (size=5056) 2024-12-02T21:30:03,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741851_1036 (size=5056) 2024-12-02T21:30:03,399 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4e0afdc9adc84535a655a463979bc57c 2024-12-02T21:30:03,405 DEBUG [M:0;87c3fdb6c570:35035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d35f7916d8674a219dab7b0e3a57dfb3 as hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d35f7916d8674a219dab7b0e3a57dfb3 2024-12-02T21:30:03,409 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d35f7916d8674a219dab7b0e3a57dfb3, entries=8, sequenceid=56, filesize=5.5 K 2024-12-02T21:30:03,410 DEBUG [M:0;87c3fdb6c570:35035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5001d591be8944f3a052735f68f74b44 as hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5001d591be8944f3a052735f68f74b44 2024-12-02T21:30:03,415 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5001d591be8944f3a052735f68f74b44, entries=6, sequenceid=56, filesize=6.0 K 2024-12-02T21:30:03,416 DEBUG [M:0;87c3fdb6c570:35035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1464ba21f9a246f4823222ca02fa12d5 as hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1464ba21f9a246f4823222ca02fa12d5 2024-12-02T21:30:03,420 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1464ba21f9a246f4823222ca02fa12d5, entries=1, sequenceid=56, filesize=5.0 K 2024-12-02T21:30:03,421 DEBUG [M:0;87c3fdb6c570:35035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4e0afdc9adc84535a655a463979bc57c as hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4e0afdc9adc84535a655a463979bc57c 2024-12-02T21:30:03,425 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4e0afdc9adc84535a655a463979bc57c, entries=1, sequenceid=56, filesize=4.9 K 2024-12-02T21:30:03,426 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 520ms, sequenceid=56, compaction requested=false 2024-12-02T21:30:03,427 INFO [M:0;87c3fdb6c570:35035 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:30:03,427 DEBUG [M:0;87c3fdb6c570:35035 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733175002906Disabling compacts and flushes for region at 1733175002906Disabling writes for close at 1733175002906Obtaining lock to block concurrent updates at 1733175002906Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733175002906Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733175002907 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733175002914 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733175002914Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733175002926 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733175002926Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733175002936 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733175002948 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733175002948Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733175002958 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733175002970 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733175002970Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733175003381 (+411 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733175003393 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733175003393Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e2d5cc1: reopening flushed file at 1733175003404 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fd04e20: reopening flushed file at 1733175003409 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1503cc73: reopening flushed file at 1733175003415 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7faee8c2: reopening flushed file at 1733175003421 (+6 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 520ms, sequenceid=56, compaction requested=false at 1733175003426 (+5 ms)Writing region close event to WAL at 1733175003427 (+1 ms)Closed at 1733175003427 2024-12-02T21:30:03,428 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:03,428 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:03,428 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:03,428 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:03,428 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:03,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741847_1031 (size=757) 2024-12-02T21:30:03,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40743 is added to blk_1073741847_1031 (size=757) 2024-12-02T21:30:03,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:03,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:03,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:03,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,123 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T21:30:04,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:04,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:04,478 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:30:04,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,494 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,494 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,494 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,498 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,499 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,499 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:04,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:05,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:05,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:06,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:30:06,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T21:30:06,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T21:30:06,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T21:30:06,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:06,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:06,914 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877/87c3fdb6c570%2C35035%2C1733174971877.1733174972737 after 4001ms 2024-12-02T21:30:06,915 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/WALs/87c3fdb6c570,35035,1733174971877/87c3fdb6c570%2C35035%2C1733174971877.1733174972737 to hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/oldWALs/87c3fdb6c570%2C35035%2C1733174971877.1733174972737 2024-12-02T21:30:06,918 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/MasterData/oldWALs/87c3fdb6c570%2C35035%2C1733174971877.1733174972737 to hdfs://localhost:45281/user/jenkins/test-data/92cc6d09-a9b3-a152-3bde-ddb60d3a169a/oldWALs/87c3fdb6c570%2C35035%2C1733174971877.1733174972737$masterlocalwal$ 2024-12-02T21:30:06,918 INFO [M:0;87c3fdb6c570:35035 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T21:30:06,918 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:30:06,918 INFO [M:0;87c3fdb6c570:35035 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35035 2024-12-02T21:30:06,918 INFO [M:0;87c3fdb6c570:35035 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:30:07,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:30:07,042 INFO [M:0;87c3fdb6c570:35035 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:30:07,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35035-0x10197f349810000, quorum=127.0.0.1:51794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:30:07,046 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75d9c3d8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:30:07,046 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21ffcd24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:30:07,046 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:30:07,047 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e79e191{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:30:07,047 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20da775a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,STOPPED} 2024-12-02T21:30:07,049 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:30:07,049 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:30:07,049 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-725672116-172.17.0.3-1733174969661 (Datanode Uuid 471ad00b-f454-4e19-9fa8-4a8c791495a3) service to localhost/127.0.0.1:45281 2024-12-02T21:30:07,049 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:30:07,050 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data3/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:30:07,051 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data4/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:30:07,051 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:30:07,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@424d5648{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:30:07,054 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a0cdfff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:30:07,054 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:30:07,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21d94b42{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:30:07,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61aceb4d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,STOPPED} 2024-12-02T21:30:07,055 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:30:07,055 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:30:07,055 WARN [BP-725672116-172.17.0.3-1733174969661 heartbeating to localhost/127.0.0.1:45281 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-725672116-172.17.0.3-1733174969661 (Datanode Uuid 522a7cfd-5a57-46ca-91ce-4908e9afdae4) service to localhost/127.0.0.1:45281 2024-12-02T21:30:07,055 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:30:07,055 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data1/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:30:07,056 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/cluster_41b8feda-0a18-765a-6123-49f20111c97b/data/data2/current/BP-725672116-172.17.0.3-1733174969661 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:30:07,056 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:30:07,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@57204301{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:30:07,060 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@131b09a7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:30:07,060 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:30:07,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e0e18a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:30:07,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60017892{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir/,STOPPED} 2024-12-02T21:30:07,065 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:30:07,083 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T21:30:07,090 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 157) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45281 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45281 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45281 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45281 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:45281 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45281 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45281 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45281 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=99 (was 130), ProcessCount=11 (was 11), AvailableMemoryMB=6683 (was 6830) 2024-12-02T21:30:07,097 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=182, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=99, ProcessCount=11, AvailableMemoryMB=6683 2024-12-02T21:30:07,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:30:07,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.log.dir so I do NOT create it in target/test-data/2eb8023b-3b47-7365-d440-63704f723d86 2024-12-02T21:30:07,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb4fd81b-3e07-2cab-d383-04f94a497870/hadoop.tmp.dir so I do NOT create it in target/test-data/2eb8023b-3b47-7365-d440-63704f723d86 2024-12-02T21:30:07,098 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9, deleteOnExit=true 2024-12-02T21:30:07,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T21:30:07,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/test.cache.data in system properties and HBase conf 2024-12-02T21:30:07,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:30:07,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:30:07,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:30:07,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:30:07,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T21:30:07,098 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:30:07,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:30:07,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:30:07,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:30:07,112 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:30:07,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:07,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:07,586 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:30:07,590 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:30:07,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:30:07,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:30:07,591 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:30:07,592 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:30:07,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@293e66d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:30:07,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ac76b28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:30:07,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d483d07{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/java.io.tmpdir/jetty-localhost-37417-hadoop-hdfs-3_4_1-tests_jar-_-any-904010600095868135/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:30:07,680 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1106c0e7{HTTP/1.1, (http/1.1)}{localhost:37417} 2024-12-02T21:30:07,680 INFO [Time-limited test {}] server.Server(415): Started @197673ms 2024-12-02T21:30:07,691 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:30:07,914 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:30:07,917 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:30:07,917 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:30:07,917 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:30:07,917 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:30:07,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cfed7b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:30:07,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@439cbd97{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:30:08,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43d16ee8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/java.io.tmpdir/jetty-localhost-44621-hadoop-hdfs-3_4_1-tests_jar-_-any-16273537456691377298/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:30:08,008 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b135886{HTTP/1.1, (http/1.1)}{localhost:44621} 2024-12-02T21:30:08,008 INFO [Time-limited test {}] server.Server(415): Started @198001ms 2024-12-02T21:30:08,009 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:30:08,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:30:08,037 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:30:08,037 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:30:08,037 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:30:08,037 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:30:08,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10c2896a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:30:08,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bd1d692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:30:08,137 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2526c219{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/java.io.tmpdir/jetty-localhost-44761-hadoop-hdfs-3_4_1-tests_jar-_-any-14466452873972471335/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:30:08,137 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74e6f5d9{HTTP/1.1, (http/1.1)}{localhost:44761} 2024-12-02T21:30:08,137 INFO [Time-limited test {}] server.Server(415): Started @198130ms 2024-12-02T21:30:08,138 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:30:08,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:08,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:09,085 WARN [Thread-1663 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9/data/data2/current/BP-1919574500-172.17.0.3-1733175007122/current, will proceed with Du for space computation calculation, 2024-12-02T21:30:09,085 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9/data/data1/current/BP-1919574500-172.17.0.3-1733175007122/current, will proceed with Du for space computation calculation, 2024-12-02T21:30:09,108 WARN [Thread-1626 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:30:09,110 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48df749aec9e3a8f with lease ID 0x180f02250a2b17e: Processing first storage report for DS-354b3088-84e1-4105-8651-b4fef35f0df4 from datanode DatanodeRegistration(127.0.0.1:38657, datanodeUuid=daf7bd07-636b-4e68-ad9b-a40179411800, infoPort=34197, infoSecurePort=0, ipcPort=44775, storageInfo=lv=-57;cid=testClusterID;nsid=468225152;c=1733175007122) 2024-12-02T21:30:09,110 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48df749aec9e3a8f with lease ID 0x180f02250a2b17e: from storage DS-354b3088-84e1-4105-8651-b4fef35f0df4 node DatanodeRegistration(127.0.0.1:38657, datanodeUuid=daf7bd07-636b-4e68-ad9b-a40179411800, infoPort=34197, infoSecurePort=0, ipcPort=44775, storageInfo=lv=-57;cid=testClusterID;nsid=468225152;c=1733175007122), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:30:09,111 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48df749aec9e3a8f with lease ID 0x180f02250a2b17e: Processing first storage report for DS-044b63c3-db7a-4e4d-aa10-32cfa3073bff from datanode DatanodeRegistration(127.0.0.1:38657, datanodeUuid=daf7bd07-636b-4e68-ad9b-a40179411800, infoPort=34197, infoSecurePort=0, ipcPort=44775, storageInfo=lv=-57;cid=testClusterID;nsid=468225152;c=1733175007122) 2024-12-02T21:30:09,111 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48df749aec9e3a8f with lease ID 0x180f02250a2b17e: from storage DS-044b63c3-db7a-4e4d-aa10-32cfa3073bff node DatanodeRegistration(127.0.0.1:38657, datanodeUuid=daf7bd07-636b-4e68-ad9b-a40179411800, infoPort=34197, infoSecurePort=0, ipcPort=44775, storageInfo=lv=-57;cid=testClusterID;nsid=468225152;c=1733175007122), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:30:09,220 WARN [Thread-1673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9/data/data3/current/BP-1919574500-172.17.0.3-1733175007122/current, will proceed with Du for space computation calculation, 2024-12-02T21:30:09,220 WARN [Thread-1674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9/data/data4/current/BP-1919574500-172.17.0.3-1733175007122/current, will proceed with Du for space computation calculation, 2024-12-02T21:30:09,239 WARN [Thread-1649 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:30:09,241 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe7b7c8e368338b06 with lease ID 0x180f02250a2b17f: Processing first storage report for DS-96a72d2e-a84c-40ad-a94f-5037b85eae9f from datanode DatanodeRegistration(127.0.0.1:43761, datanodeUuid=5ead784c-8339-44f4-97e4-2e9f05bc7950, infoPort=41129, infoSecurePort=0, ipcPort=38995, storageInfo=lv=-57;cid=testClusterID;nsid=468225152;c=1733175007122) 2024-12-02T21:30:09,241 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7b7c8e368338b06 with lease ID 0x180f02250a2b17f: from storage DS-96a72d2e-a84c-40ad-a94f-5037b85eae9f node DatanodeRegistration(127.0.0.1:43761, datanodeUuid=5ead784c-8339-44f4-97e4-2e9f05bc7950, infoPort=41129, infoSecurePort=0, ipcPort=38995, storageInfo=lv=-57;cid=testClusterID;nsid=468225152;c=1733175007122), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:30:09,241 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe7b7c8e368338b06 with lease ID 0x180f02250a2b17f: Processing first storage report for DS-e33cc8ee-6132-48e8-bf3f-36bcfed29802 from datanode DatanodeRegistration(127.0.0.1:43761, datanodeUuid=5ead784c-8339-44f4-97e4-2e9f05bc7950, infoPort=41129, infoSecurePort=0, ipcPort=38995, storageInfo=lv=-57;cid=testClusterID;nsid=468225152;c=1733175007122) 2024-12-02T21:30:09,241 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7b7c8e368338b06 with lease ID 0x180f02250a2b17f: from storage DS-e33cc8ee-6132-48e8-bf3f-36bcfed29802 node DatanodeRegistration(127.0.0.1:43761, datanodeUuid=5ead784c-8339-44f4-97e4-2e9f05bc7950, infoPort=41129, infoSecurePort=0, ipcPort=38995, storageInfo=lv=-57;cid=testClusterID;nsid=468225152;c=1733175007122), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:30:09,268 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86 2024-12-02T21:30:09,271 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9/zookeeper_0, clientPort=62166, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:30:09,272 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62166 2024-12-02T21:30:09,272 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:30:09,274 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:30:09,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:30:09,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:30:09,284 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79 with version=8 2024-12-02T21:30:09,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/hbase-staging 2024-12-02T21:30:09,286 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:30:09,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:30:09,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:30:09,286 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:30:09,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:30:09,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:30:09,286 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T21:30:09,287 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:30:09,287 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41399 2024-12-02T21:30:09,288 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41399 connecting to ZooKeeper ensemble=127.0.0.1:62166 2024-12-02T21:30:09,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:413990x0, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:30:09,342 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41399-0x10197f3dba20000 connected 2024-12-02T21:30:09,425 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:30:09,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:30:09,428 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:30:09,428 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79, hbase.cluster.distributed=false 2024-12-02T21:30:09,430 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:30:09,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41399 2024-12-02T21:30:09,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41399 2024-12-02T21:30:09,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41399 2024-12-02T21:30:09,431 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41399 2024-12-02T21:30:09,431 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41399 2024-12-02T21:30:09,443 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:30:09,443 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:30:09,443 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:30:09,444 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:30:09,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:30:09,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:30:09,444 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:30:09,444 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:30:09,445 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37609 2024-12-02T21:30:09,446 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37609 connecting to ZooKeeper ensemble=127.0.0.1:62166 2024-12-02T21:30:09,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:09,447 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:30:09,448 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:30:09,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:09,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376090x0, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:30:09,457 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37609-0x10197f3dba20001 connected 2024-12-02T21:30:09,458 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:30:09,458 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:30:09,458 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:30:09,459 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:30:09,460 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:30:09,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37609 2024-12-02T21:30:09,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37609 2024-12-02T21:30:09,465 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37609 2024-12-02T21:30:09,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37609 2024-12-02T21:30:09,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37609 2024-12-02T21:30:09,476 DEBUG [M:0;87c3fdb6c570:41399 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87c3fdb6c570:41399 2024-12-02T21:30:09,477 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87c3fdb6c570,41399,1733175009286 2024-12-02T21:30:09,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:30:09,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:30:09,489 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87c3fdb6c570,41399,1733175009286 2024-12-02T21:30:09,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:09,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:30:09,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:09,500 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:30:09,500 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87c3fdb6c570,41399,1733175009286 from backup master directory 2024-12-02T21:30:09,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:30:09,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87c3fdb6c570,41399,1733175009286 2024-12-02T21:30:09,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:30:09,510 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:30:09,510 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87c3fdb6c570,41399,1733175009286 2024-12-02T21:30:09,514 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/hbase.id] with ID: ab057b67-d100-450e-b375-bc8de7103de8 2024-12-02T21:30:09,514 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/.tmp/hbase.id 2024-12-02T21:30:09,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:30:09,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:30:09,521 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/.tmp/hbase.id]:[hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/hbase.id] 2024-12-02T21:30:09,532 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:30:09,532 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T21:30:09,534 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-02T21:30:09,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:09,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:09,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:30:09,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:30:09,548 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:30:09,549 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:30:09,549 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:30:09,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:30:09,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:30:09,556 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store 2024-12-02T21:30:09,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:30:09,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:30:09,563 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:30:09,563 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:30:09,563 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:30:09,563 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:30:09,563 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:30:09,563 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:30:09,563 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:30:09,563 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733175009563Disabling compacts and flushes for region at 1733175009563Disabling writes for close at 1733175009563Writing region close event to WAL at 1733175009563Closed at 1733175009563 2024-12-02T21:30:09,564 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/.initializing 2024-12-02T21:30:09,564 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/WALs/87c3fdb6c570,41399,1733175009286 2024-12-02T21:30:09,566 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C41399%2C1733175009286, suffix=, logDir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/WALs/87c3fdb6c570,41399,1733175009286, archiveDir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/oldWALs, maxLogs=10 2024-12-02T21:30:09,567 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C41399%2C1733175009286.1733175009567 2024-12-02T21:30:09,571 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/WALs/87c3fdb6c570,41399,1733175009286/87c3fdb6c570%2C41399%2C1733175009286.1733175009567 2024-12-02T21:30:09,571 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34197:34197),(127.0.0.1/127.0.0.1:41129:41129)] 2024-12-02T21:30:09,572 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:30:09,572 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:30:09,572 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,572 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,573 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:30:09,575 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:09,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:30:09,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:30:09,576 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:09,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:30:09,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:30:09,578 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:09,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:30:09,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,580 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:30:09,580 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:09,580 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:30:09,580 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,581 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,582 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,583 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,583 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,584 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:30:09,586 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:30:09,588 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:30:09,589 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=858811, jitterRate=0.09203556180000305}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:30:09,590 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733175009572Initializing all the Stores at 1733175009573 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175009573Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175009573Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175009573Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175009573Cleaning up temporary data from old regions at 1733175009583 (+10 ms)Region opened successfully at 1733175009590 (+7 ms) 2024-12-02T21:30:09,590 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:30:09,593 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ed5875b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:30:09,595 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T21:30:09,595 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:30:09,595 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:30:09,595 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:30:09,595 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T21:30:09,596 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T21:30:09,596 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:30:09,598 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:30:09,599 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:30:09,604 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:30:09,604 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:30:09,605 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:30:09,614 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:30:09,615 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:30:09,616 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:30:09,625 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:30:09,626 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:30:09,636 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:30:09,637 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:30:09,646 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:30:09,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:30:09,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:30:09,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:09,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:09,657 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87c3fdb6c570,41399,1733175009286, sessionid=0x10197f3dba20000, setting cluster-up flag (Was=false) 2024-12-02T21:30:09,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:09,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:09,710 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:30:09,711 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,41399,1733175009286 2024-12-02T21:30:09,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:09,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:09,762 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:30:09,766 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,41399,1733175009286 2024-12-02T21:30:09,770 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T21:30:09,773 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T21:30:09,773 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T21:30:09,773 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:30:09,774 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87c3fdb6c570,41399,1733175009286 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:30:09,775 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:30:09,775 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:30:09,775 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:30:09,775 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:30:09,776 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87c3fdb6c570:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:30:09,776 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,776 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:30:09,776 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,777 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733175039776 2024-12-02T21:30:09,777 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:30:09,777 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:30:09,777 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:30:09,777 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:30:09,777 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:30:09,777 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:30:09,777 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,777 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:30:09,777 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:30:09,777 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:30:09,778 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:30:09,778 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:30:09,778 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:30:09,778 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:30:09,778 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733175009778,5,FailOnTimeoutGroup] 2024-12-02T21:30:09,778 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733175009778,5,FailOnTimeoutGroup] 2024-12-02T21:30:09,778 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,778 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:30:09,778 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,778 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,779 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:09,779 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:30:09,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:30:09,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:30:09,787 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T21:30:09,787 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79 2024-12-02T21:30:09,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:30:09,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:30:09,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:30:09,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:30:09,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:30:09,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:09,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:30:09,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:30:09,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:30:09,801 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:09,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:30:09,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:30:09,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:30:09,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:09,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:30:09,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:30:09,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:30:09,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:09,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:30:09,804 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:30:09,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740 2024-12-02T21:30:09,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740 2024-12-02T21:30:09,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:30:09,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:30:09,807 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:30:09,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:30:09,810 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:30:09,811 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=816721, jitterRate=0.03851470351219177}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:30:09,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733175009796Initializing all the Stores at 1733175009797 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175009797Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175009798 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175009798Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175009798Cleaning up temporary data from old regions at 1733175009807 (+9 ms)Region opened successfully at 1733175009811 (+4 ms) 2024-12-02T21:30:09,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:30:09,811 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:30:09,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:30:09,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:30:09,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:30:09,812 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:30:09,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733175009811Disabling compacts and flushes for region at 1733175009811Disabling writes for close at 1733175009811Writing region close event to WAL at 1733175009812 (+1 ms)Closed at 1733175009812 2024-12-02T21:30:09,813 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:30:09,813 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T21:30:09,813 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:30:09,814 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:30:09,815 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:30:09,868 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(746): ClusterId : ab057b67-d100-450e-b375-bc8de7103de8 2024-12-02T21:30:09,868 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:30:09,879 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:30:09,879 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:30:09,889 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:30:09,890 DEBUG [RS:0;87c3fdb6c570:37609 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66903052, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:30:09,900 DEBUG [RS:0;87c3fdb6c570:37609 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87c3fdb6c570:37609 2024-12-02T21:30:09,900 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T21:30:09,900 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T21:30:09,900 DEBUG [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T21:30:09,901 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(2659): reportForDuty to master=87c3fdb6c570,41399,1733175009286 with port=37609, startcode=1733175009443 2024-12-02T21:30:09,901 DEBUG [RS:0;87c3fdb6c570:37609 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:30:09,903 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36585, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:30:09,903 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41399 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87c3fdb6c570,37609,1733175009443 2024-12-02T21:30:09,903 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41399 {}] master.ServerManager(517): Registering regionserver=87c3fdb6c570,37609,1733175009443 2024-12-02T21:30:09,905 DEBUG [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79 2024-12-02T21:30:09,905 DEBUG [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32819 2024-12-02T21:30:09,905 DEBUG [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T21:30:09,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:30:09,910 DEBUG [RS:0;87c3fdb6c570:37609 {}] zookeeper.ZKUtil(111): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87c3fdb6c570,37609,1733175009443 2024-12-02T21:30:09,910 WARN [RS:0;87c3fdb6c570:37609 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:30:09,910 INFO [RS:0;87c3fdb6c570:37609 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:30:09,910 DEBUG [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443 2024-12-02T21:30:09,910 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87c3fdb6c570,37609,1733175009443] 2024-12-02T21:30:09,914 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:30:09,915 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:30:09,915 INFO [RS:0;87c3fdb6c570:37609 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:30:09,915 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,916 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T21:30:09,916 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T21:30:09,916 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,917 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,917 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,917 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,917 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,917 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,917 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:30:09,917 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,917 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,917 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,917 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,917 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,918 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:30:09,918 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:30:09,918 DEBUG [RS:0;87c3fdb6c570:37609 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:30:09,919 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,919 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,919 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,919 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,919 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,919 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,37609,1733175009443-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:30:09,933 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:30:09,933 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,37609,1733175009443-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,933 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,933 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.Replication(171): 87c3fdb6c570,37609,1733175009443 started 2024-12-02T21:30:09,945 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:09,946 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(1482): Serving as 87c3fdb6c570,37609,1733175009443, RpcServer on 87c3fdb6c570/172.17.0.3:37609, sessionid=0x10197f3dba20001 2024-12-02T21:30:09,946 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:30:09,946 DEBUG [RS:0;87c3fdb6c570:37609 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87c3fdb6c570,37609,1733175009443 2024-12-02T21:30:09,946 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,37609,1733175009443' 2024-12-02T21:30:09,946 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:30:09,946 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:30:09,947 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:30:09,947 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:30:09,947 DEBUG [RS:0;87c3fdb6c570:37609 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87c3fdb6c570,37609,1733175009443 2024-12-02T21:30:09,947 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,37609,1733175009443' 2024-12-02T21:30:09,947 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:30:09,947 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:30:09,947 DEBUG [RS:0;87c3fdb6c570:37609 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:30:09,947 INFO [RS:0;87c3fdb6c570:37609 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:30:09,947 INFO [RS:0;87c3fdb6c570:37609 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:30:09,966 WARN [87c3fdb6c570:41399 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T21:30:10,050 INFO [RS:0;87c3fdb6c570:37609 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C37609%2C1733175009443, suffix=, logDir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443, archiveDir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/oldWALs, maxLogs=32 2024-12-02T21:30:10,051 INFO [RS:0;87c3fdb6c570:37609 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C37609%2C1733175009443.1733175010050 2024-12-02T21:30:10,059 INFO [RS:0;87c3fdb6c570:37609 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.1733175010050 2024-12-02T21:30:10,060 DEBUG [RS:0;87c3fdb6c570:37609 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34197:34197),(127.0.0.1/127.0.0.1:41129:41129)] 2024-12-02T21:30:10,216 DEBUG [87c3fdb6c570:41399 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:30:10,217 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87c3fdb6c570,37609,1733175009443 2024-12-02T21:30:10,220 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,37609,1733175009443, state=OPENING 2024-12-02T21:30:10,267 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:30:10,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:10,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:30:10,279 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:30:10,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:30:10,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:30:10,280 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,37609,1733175009443}] 2024-12-02T21:30:10,436 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:30:10,439 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50915, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:30:10,446 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T21:30:10,446 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:30:10,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:10,450 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C37609%2C1733175009443.meta, suffix=.meta, logDir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443, archiveDir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/oldWALs, maxLogs=32 2024-12-02T21:30:10,450 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C37609%2C1733175009443.meta.1733175010450.meta 2024-12-02T21:30:10,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:10,456 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.meta.1733175010450.meta 2024-12-02T21:30:10,457 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41129:41129),(127.0.0.1/127.0.0.1:34197:34197)] 2024-12-02T21:30:10,458 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:30:10,458 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:30:10,459 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:30:10,459 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:30:10,459 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:30:10,459 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:30:10,459 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T21:30:10,459 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T21:30:10,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:30:10,462 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:30:10,462 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:10,462 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:30:10,462 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:30:10,463 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:30:10,463 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:10,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:30:10,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:30:10,464 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:30:10,465 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:10,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:30:10,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:30:10,466 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:30:10,466 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:10,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:30:10,467 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:30:10,468 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740 2024-12-02T21:30:10,469 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740 2024-12-02T21:30:10,471 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:30:10,471 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:30:10,472 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:30:10,473 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:30:10,474 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745095, jitterRate=-0.05256354808807373}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:30:10,474 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T21:30:10,474 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733175010459Writing region info on filesystem at 1733175010459Initializing all the Stores at 1733175010460 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175010460Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175010460Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175010460Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175010460Cleaning up temporary data from old regions at 1733175010471 (+11 ms)Running coprocessor post-open hooks at 1733175010474 (+3 ms)Region opened successfully at 1733175010474 2024-12-02T21:30:10,475 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733175010436 2024-12-02T21:30:10,478 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:30:10,478 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T21:30:10,479 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,37609,1733175009443 2024-12-02T21:30:10,480 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,37609,1733175009443, state=OPEN 2024-12-02T21:30:10,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:30:10,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:30:10,520 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87c3fdb6c570,37609,1733175009443 2024-12-02T21:30:10,521 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:30:10,521 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:30:10,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:30:10,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,37609,1733175009443 in 241 msec 2024-12-02T21:30:10,526 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:30:10,526 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 710 msec 2024-12-02T21:30:10,527 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:30:10,527 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T21:30:10,528 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:30:10,528 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,37609,1733175009443, seqNum=-1] 2024-12-02T21:30:10,528 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:30:10,530 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37663, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:30:10,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 762 msec 2024-12-02T21:30:10,535 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733175010535, completionTime=-1 2024-12-02T21:30:10,535 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:30:10,536 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T21:30:10,537 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T21:30:10,538 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733175070538 2024-12-02T21:30:10,538 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733175130538 2024-12-02T21:30:10,538 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-02T21:30:10,538 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,41399,1733175009286-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:10,538 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,41399,1733175009286-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:10,538 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,41399,1733175009286-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:10,538 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87c3fdb6c570:41399, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:10,538 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:10,539 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:10,540 DEBUG [master/87c3fdb6c570:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T21:30:10,542 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.032sec 2024-12-02T21:30:10,542 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:30:10,542 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:30:10,542 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:30:10,542 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:30:10,542 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:30:10,542 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,41399,1733175009286-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:30:10,542 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,41399,1733175009286-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:30:10,545 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:30:10,545 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:30:10,545 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,41399,1733175009286-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:10,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74cca69f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:30:10,568 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87c3fdb6c570,41399,-1 for getting cluster id 2024-12-02T21:30:10,568 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T21:30:10,570 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ab057b67-d100-450e-b375-bc8de7103de8' 2024-12-02T21:30:10,570 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T21:30:10,570 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ab057b67-d100-450e-b375-bc8de7103de8" 2024-12-02T21:30:10,571 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee17c75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:30:10,571 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87c3fdb6c570,41399,-1] 2024-12-02T21:30:10,571 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T21:30:10,571 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:30:10,572 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52794, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T21:30:10,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3764e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:30:10,573 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:30:10,574 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,37609,1733175009443, seqNum=-1] 2024-12-02T21:30:10,575 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:30:10,575 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47930, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:30:10,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87c3fdb6c570,41399,1733175009286 2024-12-02T21:30:10,577 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:30:10,580 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T21:30:10,580 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T21:30:10,581 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 87c3fdb6c570,41399,1733175009286 2024-12-02T21:30:10,581 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4454535 2024-12-02T21:30:10,581 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T21:30:10,582 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52808, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T21:30:10,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T21:30:10,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T21:30:10,583 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:30:10,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:30:10,586 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:30:10,586 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:10,586 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-02T21:30:10,587 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:30:10,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T21:30:10,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741835_1011 (size=405) 2024-12-02T21:30:10,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741835_1011 (size=405) 2024-12-02T21:30:10,595 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5de0817ca3d807b8d3f50574951d9f2a, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79 2024-12-02T21:30:10,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741836_1012 (size=88) 2024-12-02T21:30:10,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741836_1012 (size=88) 2024-12-02T21:30:10,605 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:30:10,605 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 5de0817ca3d807b8d3f50574951d9f2a, disabling compactions & flushes 2024-12-02T21:30:10,605 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:10,605 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:10,605 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. after waiting 0 ms 2024-12-02T21:30:10,605 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:10,605 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:10,605 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5de0817ca3d807b8d3f50574951d9f2a: Waiting for close lock at 1733175010605Disabling compacts and flushes for region at 1733175010605Disabling writes for close at 1733175010605Writing region close event to WAL at 1733175010605Closed at 1733175010605 2024-12-02T21:30:10,607 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:30:10,607 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733175010607"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733175010607"}]},"ts":"1733175010607"} 2024-12-02T21:30:10,609 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T21:30:10,610 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:30:10,610 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733175010610"}]},"ts":"1733175010610"} 2024-12-02T21:30:10,612 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-02T21:30:10,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5de0817ca3d807b8d3f50574951d9f2a, ASSIGN}] 2024-12-02T21:30:10,614 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5de0817ca3d807b8d3f50574951d9f2a, ASSIGN 2024-12-02T21:30:10,615 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5de0817ca3d807b8d3f50574951d9f2a, ASSIGN; state=OFFLINE, location=87c3fdb6c570,37609,1733175009443; forceNewPlan=false, retain=false 2024-12-02T21:30:10,766 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5de0817ca3d807b8d3f50574951d9f2a, regionState=OPENING, regionLocation=87c3fdb6c570,37609,1733175009443 2024-12-02T21:30:10,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5de0817ca3d807b8d3f50574951d9f2a, ASSIGN because future has completed 2024-12-02T21:30:10,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5de0817ca3d807b8d3f50574951d9f2a, server=87c3fdb6c570,37609,1733175009443}] 2024-12-02T21:30:10,933 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:10,933 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5de0817ca3d807b8d3f50574951d9f2a, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:30:10,933 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:30:10,933 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:30:10,933 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:30:10,933 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:30:10,935 INFO [StoreOpener-5de0817ca3d807b8d3f50574951d9f2a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:30:10,936 INFO [StoreOpener-5de0817ca3d807b8d3f50574951d9f2a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5de0817ca3d807b8d3f50574951d9f2a columnFamilyName info 2024-12-02T21:30:10,936 DEBUG [StoreOpener-5de0817ca3d807b8d3f50574951d9f2a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:30:10,937 INFO [StoreOpener-5de0817ca3d807b8d3f50574951d9f2a-1 {}] regionserver.HStore(327): Store=5de0817ca3d807b8d3f50574951d9f2a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:30:10,937 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:30:10,937 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:30:10,938 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:30:10,938 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:30:10,938 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:30:10,940 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:30:10,942 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:30:10,943 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5de0817ca3d807b8d3f50574951d9f2a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741618, jitterRate=-0.05698476731777191}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:30:10,943 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:30:10,943 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5de0817ca3d807b8d3f50574951d9f2a: Running coprocessor pre-open hook at 1733175010933Writing region info on filesystem at 1733175010933Initializing all the Stores at 1733175010934 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175010934Cleaning up temporary data from old regions at 1733175010938 (+4 ms)Running coprocessor post-open hooks at 1733175010943 (+5 ms)Region opened successfully at 1733175010943 2024-12-02T21:30:10,944 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a., pid=6, masterSystemTime=1733175010928 2024-12-02T21:30:10,947 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:10,947 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:10,948 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5de0817ca3d807b8d3f50574951d9f2a, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,37609,1733175009443 2024-12-02T21:30:10,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5de0817ca3d807b8d3f50574951d9f2a, server=87c3fdb6c570,37609,1733175009443 because future has completed 2024-12-02T21:30:10,953 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T21:30:10,953 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5de0817ca3d807b8d3f50574951d9f2a, server=87c3fdb6c570,37609,1733175009443 in 177 msec 2024-12-02T21:30:10,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T21:30:10,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5de0817ca3d807b8d3f50574951d9f2a, ASSIGN in 341 msec 2024-12-02T21:30:10,956 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:30:10,956 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733175010956"}]},"ts":"1733175010956"} 2024-12-02T21:30:10,959 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-02T21:30:10,960 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:30:10,962 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 377 msec 2024-12-02T21:30:11,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:11,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:11,574 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:30:11,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,596 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:11,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:30:12,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:12,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:13,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:13,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:14,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:14,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:15,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:15,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:15,914 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T21:30:15,916 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-02T21:30:16,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T21:30:16,071 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T21:30:16,073 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:30:16,073 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T21:30:16,074 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T21:30:16,074 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T21:30:16,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:30:16,075 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-02T21:30:16,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:16,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:17,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:17,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:18,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:18,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:19,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:19,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:20,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:20,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:20,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T21:30:20,654 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-02T21:30:20,654 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-02T21:30:20,662 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:30:20,662 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:20,666 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a., hostname=87c3fdb6c570,37609,1733175009443, seqNum=2] 2024-12-02T21:30:20,672 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:30:20,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:30:20,678 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T21:30:20,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T21:30:20,679 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T21:30:20,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T21:30:20,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37609 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-02T21:30:20,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:20,847 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 5de0817ca3d807b8d3f50574951d9f2a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T21:30:20,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/6e5bb1583e404c4dad7b832b41962d96 is 1080, key is row0001/info:/1733175020667/Put/seqid=0 2024-12-02T21:30:20,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741837_1013 (size=6033) 2024-12-02T21:30:20,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741837_1013 (size=6033) 2024-12-02T21:30:20,870 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/6e5bb1583e404c4dad7b832b41962d96 2024-12-02T21:30:20,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/6e5bb1583e404c4dad7b832b41962d96 as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/6e5bb1583e404c4dad7b832b41962d96 2024-12-02T21:30:20,883 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/6e5bb1583e404c4dad7b832b41962d96, entries=1, sequenceid=5, filesize=5.9 K 2024-12-02T21:30:20,884 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5de0817ca3d807b8d3f50574951d9f2a in 37ms, sequenceid=5, compaction requested=false 2024-12-02T21:30:20,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 5de0817ca3d807b8d3f50574951d9f2a: 2024-12-02T21:30:20,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:20,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-02T21:30:20,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-02T21:30:20,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-02T21:30:20,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-12-02T21:30:20,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 220 msec 2024-12-02T21:30:21,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:21,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:22,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:22,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:23,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:23,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:24,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:24,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:25,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:25,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:26,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:26,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:27,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:27,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:28,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:28,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:29,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:29,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:30,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:30,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:30,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 after 68077ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:30:30,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta after 68059ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:30:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T21:30:30,712 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-02T21:30:30,718 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:30:30,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:30:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-02T21:30:30,723 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T21:30:30,726 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T21:30:30,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T21:30:30,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37609 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-02T21:30:30,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:30,885 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 5de0817ca3d807b8d3f50574951d9f2a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T21:30:30,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/8237782d4dbf497ab1fa8a1d35f96334 is 1080, key is row0002/info:/1733175030714/Put/seqid=0 2024-12-02T21:30:30,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741838_1014 (size=6033) 2024-12-02T21:30:30,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741838_1014 (size=6033) 2024-12-02T21:30:30,901 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/8237782d4dbf497ab1fa8a1d35f96334 2024-12-02T21:30:30,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/8237782d4dbf497ab1fa8a1d35f96334 as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/8237782d4dbf497ab1fa8a1d35f96334 2024-12-02T21:30:30,914 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/8237782d4dbf497ab1fa8a1d35f96334, entries=1, sequenceid=9, filesize=5.9 K 2024-12-02T21:30:30,915 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5de0817ca3d807b8d3f50574951d9f2a in 30ms, sequenceid=9, compaction requested=false 2024-12-02T21:30:30,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 5de0817ca3d807b8d3f50574951d9f2a: 2024-12-02T21:30:30,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:30,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-02T21:30:30,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-02T21:30:30,919 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-02T21:30:30,919 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-12-02T21:30:30,921 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 202 msec 2024-12-02T21:30:31,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:31,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:32,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:32,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:33,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:33,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:34,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:34,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:35,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:35,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:36,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:36,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:37,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:37,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:38,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:38,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:39,268 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:30:39,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:39,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:40,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:40,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-02T21:30:40,753 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-02T21:30:40,761 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C37609%2C1733175009443.1733175040761 2024-12-02T21:30:40,770 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:40,770 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:40,770 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:40,770 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:40,770 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:40,770 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.1733175010050 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.1733175040761 2024-12-02T21:30:40,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741833_1009 (size=5546) 2024-12-02T21:30:40,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741833_1009 (size=5546) 2024-12-02T21:30:40,776 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41129:41129),(127.0.0.1/127.0.0.1:34197:34197)] 2024-12-02T21:30:40,777 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:30:40,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:30:40,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-02T21:30:40,818 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T21:30:40,819 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T21:30:40,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T21:30:40,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37609 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-02T21:30:40,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:40,975 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 5de0817ca3d807b8d3f50574951d9f2a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T21:30:40,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/1da1df30cfa4452ca45d84816c2b4798 is 1080, key is row0003/info:/1733175040757/Put/seqid=0 2024-12-02T21:30:40,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741840_1016 (size=6033) 2024-12-02T21:30:40,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741840_1016 (size=6033) 2024-12-02T21:30:40,988 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/1da1df30cfa4452ca45d84816c2b4798 2024-12-02T21:30:40,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/1da1df30cfa4452ca45d84816c2b4798 as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/1da1df30cfa4452ca45d84816c2b4798 2024-12-02T21:30:41,003 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/1da1df30cfa4452ca45d84816c2b4798, entries=1, sequenceid=13, filesize=5.9 K 2024-12-02T21:30:41,004 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5de0817ca3d807b8d3f50574951d9f2a in 29ms, sequenceid=13, compaction requested=true 2024-12-02T21:30:41,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 5de0817ca3d807b8d3f50574951d9f2a: 2024-12-02T21:30:41,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:41,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-02T21:30:41,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-02T21:30:41,008 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-02T21:30:41,008 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-12-02T21:30:41,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 232 msec 2024-12-02T21:30:41,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:41,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:42,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:42,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:43,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:43,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:44,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:44,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:45,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:45,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:46,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:46,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:47,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:47,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:48,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:48,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:49,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:49,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:50,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:50,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:50,601 INFO [master/87c3fdb6c570:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T21:30:50,601 INFO [master/87c3fdb6c570:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T21:30:50,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-02T21:30:50,793 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-02T21:30:50,793 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:30:50,797 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:30:50,797 DEBUG [Time-limited test {}] regionserver.HStore(1541): 5de0817ca3d807b8d3f50574951d9f2a/info is initiating minor compaction (all files) 2024-12-02T21:30:50,797 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:30:50,798 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:30:50,798 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 5de0817ca3d807b8d3f50574951d9f2a/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:50,798 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/6e5bb1583e404c4dad7b832b41962d96, hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/8237782d4dbf497ab1fa8a1d35f96334, hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/1da1df30cfa4452ca45d84816c2b4798] into tmpdir=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp, totalSize=17.7 K 2024-12-02T21:30:50,800 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 6e5bb1583e404c4dad7b832b41962d96, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733175020667 2024-12-02T21:30:50,801 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 8237782d4dbf497ab1fa8a1d35f96334, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733175030714 2024-12-02T21:30:50,803 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1da1df30cfa4452ca45d84816c2b4798, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733175040757 2024-12-02T21:30:50,815 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 5de0817ca3d807b8d3f50574951d9f2a#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:30:50,816 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/bcc6d2dc930846ca952f8399b095347a is 1080, key is row0001/info:/1733175020667/Put/seqid=0 2024-12-02T21:30:50,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741841_1017 (size=8296) 2024-12-02T21:30:50,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741841_1017 (size=8296) 2024-12-02T21:30:50,827 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/bcc6d2dc930846ca952f8399b095347a as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/bcc6d2dc930846ca952f8399b095347a 2024-12-02T21:30:50,833 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5de0817ca3d807b8d3f50574951d9f2a/info of 5de0817ca3d807b8d3f50574951d9f2a into bcc6d2dc930846ca952f8399b095347a(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:30:50,833 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 5de0817ca3d807b8d3f50574951d9f2a: 2024-12-02T21:30:50,835 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C37609%2C1733175009443.1733175050835 2024-12-02T21:30:50,841 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:50,841 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:50,842 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:50,842 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:50,842 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:30:50,842 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.1733175040761 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.1733175050835 2024-12-02T21:30:50,843 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34197:34197),(127.0.0.1/127.0.0.1:41129:41129)] 2024-12-02T21:30:50,843 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.1733175040761 is not closed yet, will try archiving it next time 2024-12-02T21:30:50,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741839_1015 (size=2520) 2024-12-02T21:30:50,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741839_1015 (size=2520) 2024-12-02T21:30:50,848 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.1733175010050 to hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/oldWALs/87c3fdb6c570%2C37609%2C1733175009443.1733175010050 2024-12-02T21:30:50,849 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:30:50,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:30:50,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-02T21:30:50,851 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T21:30:50,851 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T21:30:50,852 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T21:30:51,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37609 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-02T21:30:51,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:51,005 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 5de0817ca3d807b8d3f50574951d9f2a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T21:30:51,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/9dbf6a6e7fae467da5d320d43e2a408d is 1080, key is row0000/info:/1733175050834/Put/seqid=0 2024-12-02T21:30:51,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741843_1019 (size=6033) 2024-12-02T21:30:51,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741843_1019 (size=6033) 2024-12-02T21:30:51,142 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/9dbf6a6e7fae467da5d320d43e2a408d 2024-12-02T21:30:51,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/9dbf6a6e7fae467da5d320d43e2a408d as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/9dbf6a6e7fae467da5d320d43e2a408d 2024-12-02T21:30:51,157 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/9dbf6a6e7fae467da5d320d43e2a408d, entries=1, sequenceid=18, filesize=5.9 K 2024-12-02T21:30:51,159 INFO [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5de0817ca3d807b8d3f50574951d9f2a in 154ms, sequenceid=18, compaction requested=false 2024-12-02T21:30:51,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 5de0817ca3d807b8d3f50574951d9f2a: 2024-12-02T21:30:51,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:30:51,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-02T21:30:51,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-02T21:30:51,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-02T21:30:51,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 309 msec 2024-12-02T21:30:51,166 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 315 msec 2024-12-02T21:30:51,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:51,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:52,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:52,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:53,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:53,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:54,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:54,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:55,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:55,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:55,934 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5de0817ca3d807b8d3f50574951d9f2a, had cached 0 bytes from a total of 14329 2024-12-02T21:30:56,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:56,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:57,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:57,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:58,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:58,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:59,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:30:59,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:00,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:00,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:00,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-02T21:31:00,863 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-02T21:31:00,869 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C37609%2C1733175009443.1733175060869 2024-12-02T21:31:00,880 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:00,880 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:00,881 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:00,881 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:00,881 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:00,881 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.1733175050835 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.1733175060869 2024-12-02T21:31:00,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741842_1018 (size=2026) 2024-12-02T21:31:00,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741842_1018 (size=2026) 2024-12-02T21:31:00,883 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34197:34197),(127.0.0.1/127.0.0.1:41129:41129)] 2024-12-02T21:31:00,883 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.1733175050835 is not closed yet, will try archiving it next time 2024-12-02T21:31:00,883 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/WALs/87c3fdb6c570,37609,1733175009443/87c3fdb6c570%2C37609%2C1733175009443.1733175040761 to hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/oldWALs/87c3fdb6c570%2C37609%2C1733175009443.1733175040761 2024-12-02T21:31:00,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T21:31:00,883 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:31:00,884 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:31:00,884 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:00,884 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:00,884 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T21:31:00,884 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:31:00,884 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=309456352, stopped=false 2024-12-02T21:31:00,884 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87c3fdb6c570,41399,1733175009286 2024-12-02T21:31:00,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:31:00,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:31:00,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:00,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:00,949 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:31:00,949 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:31:00,949 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:31:00,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:00,949 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:31:00,949 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:31:00,950 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87c3fdb6c570,37609,1733175009443' ***** 2024-12-02T21:31:00,950 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T21:31:00,950 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:31:00,950 INFO [RS:0;87c3fdb6c570:37609 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:31:00,950 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T21:31:00,950 INFO [RS:0;87c3fdb6c570:37609 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:31:00,950 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(3091): Received CLOSE for 5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:31:00,950 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(959): stopping server 87c3fdb6c570,37609,1733175009443 2024-12-02T21:31:00,950 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:31:00,950 INFO [RS:0;87c3fdb6c570:37609 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87c3fdb6c570:37609. 2024-12-02T21:31:00,950 DEBUG [RS:0;87c3fdb6c570:37609 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:31:00,950 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5de0817ca3d807b8d3f50574951d9f2a, disabling compactions & flushes 2024-12-02T21:31:00,950 DEBUG [RS:0;87c3fdb6c570:37609 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:00,950 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:31:00,950 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:31:00,950 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:31:00,950 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:31:00,950 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:31:00,950 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. after waiting 0 ms 2024-12-02T21:31:00,950 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T21:31:00,950 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:31:00,951 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 5de0817ca3d807b8d3f50574951d9f2a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T21:31:00,951 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T21:31:00,951 DEBUG [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(1325): Online Regions={5de0817ca3d807b8d3f50574951d9f2a=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a., 1588230740=hbase:meta,,1.1588230740} 2024-12-02T21:31:00,951 DEBUG [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5de0817ca3d807b8d3f50574951d9f2a 2024-12-02T21:31:00,951 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:31:00,951 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:31:00,951 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:31:00,951 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:31:00,951 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:31:00,951 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-02T21:31:00,954 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/1b426ab987d64668acde4ac17023edda is 1080, key is row0001/info:/1733175060866/Put/seqid=0 2024-12-02T21:31:00,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741845_1021 (size=6033) 2024-12-02T21:31:00,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741845_1021 (size=6033) 2024-12-02T21:31:00,960 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/1b426ab987d64668acde4ac17023edda 2024-12-02T21:31:00,967 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/.tmp/info/d590ee940ff04d8194cf01cc2fb744e3 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a./info:regioninfo/1733175010947/Put/seqid=0 2024-12-02T21:31:00,967 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/.tmp/info/1b426ab987d64668acde4ac17023edda as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/1b426ab987d64668acde4ac17023edda 2024-12-02T21:31:00,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741846_1022 (size=7308) 2024-12-02T21:31:00,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741846_1022 (size=7308) 2024-12-02T21:31:00,972 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/.tmp/info/d590ee940ff04d8194cf01cc2fb744e3 2024-12-02T21:31:00,973 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/1b426ab987d64668acde4ac17023edda, entries=1, sequenceid=22, filesize=5.9 K 2024-12-02T21:31:00,974 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5de0817ca3d807b8d3f50574951d9f2a in 23ms, sequenceid=22, compaction requested=true 2024-12-02T21:31:00,974 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/6e5bb1583e404c4dad7b832b41962d96, hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/8237782d4dbf497ab1fa8a1d35f96334, hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/1da1df30cfa4452ca45d84816c2b4798] to archive 2024-12-02T21:31:00,975 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T21:31:00,976 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/6e5bb1583e404c4dad7b832b41962d96 to hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/6e5bb1583e404c4dad7b832b41962d96 2024-12-02T21:31:00,978 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/8237782d4dbf497ab1fa8a1d35f96334 to hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/8237782d4dbf497ab1fa8a1d35f96334 2024-12-02T21:31:00,979 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/1da1df30cfa4452ca45d84816c2b4798 to hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/info/1da1df30cfa4452ca45d84816c2b4798 2024-12-02T21:31:00,979 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=87c3fdb6c570:41399 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-02T21:31:00,979 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6e5bb1583e404c4dad7b832b41962d96=6033, 8237782d4dbf497ab1fa8a1d35f96334=6033, 1da1df30cfa4452ca45d84816c2b4798=6033] 2024-12-02T21:31:00,983 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5de0817ca3d807b8d3f50574951d9f2a/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-02T21:31:00,984 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:31:00,984 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5de0817ca3d807b8d3f50574951d9f2a: Waiting for close lock at 1733175060950Running coprocessor pre-close hooks at 1733175060950Disabling compacts and flushes for region at 1733175060950Disabling writes for close at 1733175060950Obtaining lock to block concurrent updates at 1733175060951 (+1 ms)Preparing flush snapshotting stores in 5de0817ca3d807b8d3f50574951d9f2a at 1733175060951Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733175060951Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. at 1733175060951Flushing 5de0817ca3d807b8d3f50574951d9f2a/info: creating writer at 1733175060951Flushing 5de0817ca3d807b8d3f50574951d9f2a/info: appending metadata at 1733175060954 (+3 ms)Flushing 5de0817ca3d807b8d3f50574951d9f2a/info: closing flushed file at 1733175060954Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@745cbcab: reopening flushed file at 1733175060966 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5de0817ca3d807b8d3f50574951d9f2a in 23ms, sequenceid=22, compaction requested=true at 1733175060974 (+8 ms)Writing region close event to WAL at 1733175060980 (+6 ms)Running coprocessor post-close hooks at 1733175060984 (+4 ms)Closed at 1733175060984 2024-12-02T21:31:00,984 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733175010582.5de0817ca3d807b8d3f50574951d9f2a. 2024-12-02T21:31:00,990 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/.tmp/ns/32ff664207514695bb638230d4a4c890 is 43, key is default/ns:d/1733175010530/Put/seqid=0 2024-12-02T21:31:00,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741847_1023 (size=5153) 2024-12-02T21:31:00,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741847_1023 (size=5153) 2024-12-02T21:31:00,995 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/.tmp/ns/32ff664207514695bb638230d4a4c890 2024-12-02T21:31:01,014 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/.tmp/table/1742e106520c41019190246d33a8d09a is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733175010956/Put/seqid=0 2024-12-02T21:31:01,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741848_1024 (size=5508) 2024-12-02T21:31:01,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741848_1024 (size=5508) 2024-12-02T21:31:01,019 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/.tmp/table/1742e106520c41019190246d33a8d09a 2024-12-02T21:31:01,025 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/.tmp/info/d590ee940ff04d8194cf01cc2fb744e3 as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/info/d590ee940ff04d8194cf01cc2fb744e3 2024-12-02T21:31:01,029 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/info/d590ee940ff04d8194cf01cc2fb744e3, entries=10, sequenceid=11, filesize=7.1 K 2024-12-02T21:31:01,030 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/.tmp/ns/32ff664207514695bb638230d4a4c890 as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/ns/32ff664207514695bb638230d4a4c890 2024-12-02T21:31:01,035 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/ns/32ff664207514695bb638230d4a4c890, entries=2, sequenceid=11, filesize=5.0 K 2024-12-02T21:31:01,036 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/.tmp/table/1742e106520c41019190246d33a8d09a as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/table/1742e106520c41019190246d33a8d09a 2024-12-02T21:31:01,041 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/table/1742e106520c41019190246d33a8d09a, entries=2, sequenceid=11, filesize=5.4 K 2024-12-02T21:31:01,042 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 91ms, sequenceid=11, compaction requested=false 2024-12-02T21:31:01,046 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-02T21:31:01,046 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:31:01,046 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:31:01,046 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733175060951Running coprocessor pre-close hooks at 1733175060951Disabling compacts and flushes for region at 1733175060951Disabling writes for close at 1733175060951Obtaining lock to block concurrent updates at 1733175060951Preparing flush snapshotting stores in 1588230740 at 1733175060951Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733175060951Flushing stores of hbase:meta,,1.1588230740 at 1733175060952 (+1 ms)Flushing 1588230740/info: creating writer at 1733175060952Flushing 1588230740/info: appending metadata at 1733175060966 (+14 ms)Flushing 1588230740/info: closing flushed file at 1733175060966Flushing 1588230740/ns: creating writer at 1733175060976 (+10 ms)Flushing 1588230740/ns: appending metadata at 1733175060990 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733175060990Flushing 1588230740/table: creating writer at 1733175061000 (+10 ms)Flushing 1588230740/table: appending metadata at 1733175061014 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733175061014Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36661cba: reopening flushed file at 1733175061024 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23eebe6c: reopening flushed file at 1733175061029 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19b2152f: reopening flushed file at 1733175061035 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 91ms, sequenceid=11, compaction requested=false at 1733175061042 (+7 ms)Writing region close event to WAL at 1733175061043 (+1 ms)Running coprocessor post-close hooks at 1733175061046 (+3 ms)Closed at 1733175061046 2024-12-02T21:31:01,046 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:31:01,151 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(976): stopping server 87c3fdb6c570,37609,1733175009443; all regions closed. 2024-12-02T21:31:01,152 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,152 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,153 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,153 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,153 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741834_1010 (size=3306) 2024-12-02T21:31:01,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741834_1010 (size=3306) 2024-12-02T21:31:01,163 DEBUG [RS:0;87c3fdb6c570:37609 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/oldWALs 2024-12-02T21:31:01,163 INFO [RS:0;87c3fdb6c570:37609 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C37609%2C1733175009443.meta:.meta(num 1733175010450) 2024-12-02T21:31:01,164 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,164 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,164 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,164 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,164 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741844_1020 (size=1252) 2024-12-02T21:31:01,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741844_1020 (size=1252) 2024-12-02T21:31:01,171 DEBUG [RS:0;87c3fdb6c570:37609 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/oldWALs 2024-12-02T21:31:01,171 INFO [RS:0;87c3fdb6c570:37609 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C37609%2C1733175009443:(num 1733175060869) 2024-12-02T21:31:01,171 DEBUG [RS:0;87c3fdb6c570:37609 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:01,171 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:31:01,171 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:31:01,171 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.ChoreService(370): Chore service for: regionserver/87c3fdb6c570:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T21:31:01,171 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:31:01,172 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:31:01,172 INFO [RS:0;87c3fdb6c570:37609 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37609 2024-12-02T21:31:01,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:31:01,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87c3fdb6c570,37609,1733175009443 2024-12-02T21:31:01,180 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:31:01,191 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87c3fdb6c570,37609,1733175009443] 2024-12-02T21:31:01,201 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87c3fdb6c570,37609,1733175009443 already deleted, retry=false 2024-12-02T21:31:01,201 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87c3fdb6c570,37609,1733175009443 expired; onlineServers=0 2024-12-02T21:31:01,201 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87c3fdb6c570,41399,1733175009286' ***** 2024-12-02T21:31:01,201 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:31:01,201 INFO [M:0;87c3fdb6c570:41399 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:31:01,202 INFO [M:0;87c3fdb6c570:41399 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:31:01,202 DEBUG [M:0;87c3fdb6c570:41399 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:31:01,202 DEBUG [M:0;87c3fdb6c570:41399 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:31:01,202 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:31:01,202 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733175009778 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733175009778,5,FailOnTimeoutGroup] 2024-12-02T21:31:01,202 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733175009778 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733175009778,5,FailOnTimeoutGroup] 2024-12-02T21:31:01,202 INFO [M:0;87c3fdb6c570:41399 {}] hbase.ChoreService(370): Chore service for: master/87c3fdb6c570:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T21:31:01,203 INFO [M:0;87c3fdb6c570:41399 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:31:01,203 DEBUG [M:0;87c3fdb6c570:41399 {}] master.HMaster(1795): Stopping service threads 2024-12-02T21:31:01,203 INFO [M:0;87c3fdb6c570:41399 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:31:01,203 INFO [M:0;87c3fdb6c570:41399 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:31:01,204 INFO [M:0;87c3fdb6c570:41399 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:31:01,204 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:31:01,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:31:01,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:01,212 DEBUG [M:0;87c3fdb6c570:41399 {}] zookeeper.ZKUtil(347): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:31:01,212 WARN [M:0;87c3fdb6c570:41399 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:31:01,213 INFO [M:0;87c3fdb6c570:41399 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/.lastflushedseqids 2024-12-02T21:31:01,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741849_1025 (size=130) 2024-12-02T21:31:01,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741849_1025 (size=130) 2024-12-02T21:31:01,218 INFO [M:0;87c3fdb6c570:41399 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T21:31:01,218 INFO [M:0;87c3fdb6c570:41399 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:31:01,218 DEBUG [M:0;87c3fdb6c570:41399 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:31:01,218 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:01,218 DEBUG [M:0;87c3fdb6c570:41399 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:01,218 DEBUG [M:0;87c3fdb6c570:41399 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:31:01,218 DEBUG [M:0;87c3fdb6c570:41399 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:01,219 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-12-02T21:31:01,237 DEBUG [M:0;87c3fdb6c570:41399 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/49518f7812a742b8b08bc378209ee3ca is 82, key is hbase:meta,,1/info:regioninfo/1733175010479/Put/seqid=0 2024-12-02T21:31:01,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741850_1026 (size=5672) 2024-12-02T21:31:01,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741850_1026 (size=5672) 2024-12-02T21:31:01,242 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/49518f7812a742b8b08bc378209ee3ca 2024-12-02T21:31:01,260 DEBUG [M:0;87c3fdb6c570:41399 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/46b23a1e054f46469a69ac3ad778baef is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733175010961/Put/seqid=0 2024-12-02T21:31:01,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741851_1027 (size=7823) 2024-12-02T21:31:01,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741851_1027 (size=7823) 2024-12-02T21:31:01,265 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/46b23a1e054f46469a69ac3ad778baef 2024-12-02T21:31:01,269 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 46b23a1e054f46469a69ac3ad778baef 2024-12-02T21:31:01,288 DEBUG [M:0;87c3fdb6c570:41399 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fc815b2cbc174c8288620d702e26e11e is 69, key is 87c3fdb6c570,37609,1733175009443/rs:state/1733175009903/Put/seqid=0 2024-12-02T21:31:01,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:01,291 INFO [RS:0;87c3fdb6c570:37609 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:31:01,291 INFO [RS:0;87c3fdb6c570:37609 {}] regionserver.HRegionServer(1031): Exiting; stopping=87c3fdb6c570,37609,1733175009443; zookeeper connection closed. 2024-12-02T21:31:01,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37609-0x10197f3dba20001, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:01,291 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1b07beca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1b07beca 2024-12-02T21:31:01,291 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T21:31:01,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741852_1028 (size=5156) 2024-12-02T21:31:01,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741852_1028 (size=5156) 2024-12-02T21:31:01,292 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fc815b2cbc174c8288620d702e26e11e 2024-12-02T21:31:01,309 DEBUG [M:0;87c3fdb6c570:41399 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7727842d83d24228bec8147b11a25d8a is 52, key is load_balancer_on/state:d/1733175010579/Put/seqid=0 2024-12-02T21:31:01,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741853_1029 (size=5056) 2024-12-02T21:31:01,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741853_1029 (size=5056) 2024-12-02T21:31:01,314 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7727842d83d24228bec8147b11a25d8a 2024-12-02T21:31:01,320 DEBUG [M:0;87c3fdb6c570:41399 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/49518f7812a742b8b08bc378209ee3ca as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/49518f7812a742b8b08bc378209ee3ca 2024-12-02T21:31:01,325 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/49518f7812a742b8b08bc378209ee3ca, entries=8, sequenceid=121, filesize=5.5 K 2024-12-02T21:31:01,326 DEBUG [M:0;87c3fdb6c570:41399 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/46b23a1e054f46469a69ac3ad778baef as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/46b23a1e054f46469a69ac3ad778baef 2024-12-02T21:31:01,331 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 46b23a1e054f46469a69ac3ad778baef 2024-12-02T21:31:01,332 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/46b23a1e054f46469a69ac3ad778baef, entries=14, sequenceid=121, filesize=7.6 K 2024-12-02T21:31:01,333 DEBUG [M:0;87c3fdb6c570:41399 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fc815b2cbc174c8288620d702e26e11e as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fc815b2cbc174c8288620d702e26e11e 2024-12-02T21:31:01,338 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fc815b2cbc174c8288620d702e26e11e, entries=1, sequenceid=121, filesize=5.0 K 2024-12-02T21:31:01,339 DEBUG [M:0;87c3fdb6c570:41399 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7727842d83d24228bec8147b11a25d8a as hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7727842d83d24228bec8147b11a25d8a 2024-12-02T21:31:01,345 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32819/user/jenkins/test-data/e1f1deb3-b218-0fb3-1416-626f300b8f79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7727842d83d24228bec8147b11a25d8a, entries=1, sequenceid=121, filesize=4.9 K 2024-12-02T21:31:01,346 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44641, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=121, compaction requested=false 2024-12-02T21:31:01,349 INFO [M:0;87c3fdb6c570:41399 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:01,349 DEBUG [M:0;87c3fdb6c570:41399 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733175061218Disabling compacts and flushes for region at 1733175061218Disabling writes for close at 1733175061218Obtaining lock to block concurrent updates at 1733175061219 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733175061219Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44641, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1733175061219Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733175061220 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733175061220Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733175061237 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733175061237Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733175061247 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733175061260 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733175061260Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733175061270 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733175061287 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733175061287Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733175061297 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733175061309 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733175061309Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1866b494: reopening flushed file at 1733175061319 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31eef22d: reopening flushed file at 1733175061325 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66823ab3: reopening flushed file at 1733175061332 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1453af60: reopening flushed file at 1733175061338 (+6 ms)Finished flush of dataSize ~43.59 KB/44641, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=121, compaction requested=false at 1733175061346 (+8 ms)Writing region close event to WAL at 1733175061349 (+3 ms)Closed at 1733175061349 2024-12-02T21:31:01,349 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,350 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,350 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,350 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,350 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:01,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43761 is added to blk_1073741830_1006 (size=53038) 2024-12-02T21:31:01,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38657 is added to blk_1073741830_1006 (size=53038) 2024-12-02T21:31:01,353 INFO [M:0;87c3fdb6c570:41399 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T21:31:01,353 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:31:01,353 INFO [M:0;87c3fdb6c570:41399 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41399 2024-12-02T21:31:01,353 INFO [M:0;87c3fdb6c570:41399 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:31:01,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:01,459 INFO [M:0;87c3fdb6c570:41399 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:31:01,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41399-0x10197f3dba20000, quorum=127.0.0.1:62166, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:01,461 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2526c219{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:31:01,462 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74e6f5d9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:31:01,462 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:31:01,462 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bd1d692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:31:01,462 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10c2896a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/hadoop.log.dir/,STOPPED} 2024-12-02T21:31:01,463 WARN [BP-1919574500-172.17.0.3-1733175007122 heartbeating to localhost/127.0.0.1:32819 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:31:01,463 WARN [BP-1919574500-172.17.0.3-1733175007122 heartbeating to localhost/127.0.0.1:32819 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1919574500-172.17.0.3-1733175007122 (Datanode Uuid 5ead784c-8339-44f4-97e4-2e9f05bc7950) service to localhost/127.0.0.1:32819 2024-12-02T21:31:01,463 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:31:01,463 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:31:01,464 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9/data/data3/current/BP-1919574500-172.17.0.3-1733175007122 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:01,464 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9/data/data4/current/BP-1919574500-172.17.0.3-1733175007122 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:01,464 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:31:01,466 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43d16ee8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:31:01,466 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b135886{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:31:01,466 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:31:01,466 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@439cbd97{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:31:01,466 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cfed7b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/hadoop.log.dir/,STOPPED} 2024-12-02T21:31:01,468 WARN [BP-1919574500-172.17.0.3-1733175007122 heartbeating to localhost/127.0.0.1:32819 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:31:01,468 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:31:01,468 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:31:01,468 WARN [BP-1919574500-172.17.0.3-1733175007122 heartbeating to localhost/127.0.0.1:32819 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1919574500-172.17.0.3-1733175007122 (Datanode Uuid daf7bd07-636b-4e68-ad9b-a40179411800) service to localhost/127.0.0.1:32819 2024-12-02T21:31:01,469 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9/data/data1/current/BP-1919574500-172.17.0.3-1733175007122 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:01,469 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/cluster_87b6a7a5-edf3-4897-2dd8-e3b2b5174ef9/data/data2/current/BP-1919574500-172.17.0.3-1733175007122 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:01,469 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:31:01,474 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d483d07{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:31:01,475 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1106c0e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:31:01,475 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:31:01,475 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ac76b28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:31:01,475 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@293e66d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/hadoop.log.dir/,STOPPED} 2024-12-02T21:31:01,480 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:31:01,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T21:31:01,506 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 182) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:32819 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32819 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32819 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:32819 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32819 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32819 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32819 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:32819 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=55 (was 99), ProcessCount=11 (was 11), AvailableMemoryMB=6623 (was 6683) 2024-12-02T21:31:01,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:01,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:01,515 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=55, ProcessCount=11, AvailableMemoryMB=6622 2024-12-02T21:31:01,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/hadoop.log.dir so I do NOT create it in target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2eb8023b-3b47-7365-d440-63704f723d86/hadoop.tmp.dir so I do NOT create it in target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5, deleteOnExit=true 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/test.cache.data in system properties and HBase conf 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T21:31:01,516 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:31:01,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:31:01,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:31:01,528 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:31:01,861 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:31:01,864 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:31:01,865 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:31:01,865 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:31:01,865 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:31:01,866 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:31:01,866 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24350cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:31:01,866 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f8818bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:31:01,923 INFO [regionserver/87c3fdb6c570:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:31:01,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cf57c4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/java.io.tmpdir/jetty-localhost-33123-hadoop-hdfs-3_4_1-tests_jar-_-any-2885436846470171382/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:31:01,955 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a67ff9c{HTTP/1.1, (http/1.1)}{localhost:33123} 2024-12-02T21:31:01,955 INFO [Time-limited test {}] server.Server(415): Started @251947ms 2024-12-02T21:31:01,964 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:31:02,221 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:31:02,224 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:31:02,224 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:31:02,224 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:31:02,225 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:31:02,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c08109c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:31:02,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1724ca70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:31:02,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9530f47{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/java.io.tmpdir/jetty-localhost-34713-hadoop-hdfs-3_4_1-tests_jar-_-any-3323746146381551435/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:31:02,314 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59fd7f75{HTTP/1.1, (http/1.1)}{localhost:34713} 2024-12-02T21:31:02,314 INFO [Time-limited test {}] server.Server(415): Started @252306ms 2024-12-02T21:31:02,315 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:31:02,340 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:31:02,342 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:31:02,343 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:31:02,343 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:31:02,343 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:31:02,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@208716aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:31:02,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70aed17c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:31:02,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5eab25ce{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/java.io.tmpdir/jetty-localhost-33425-hadoop-hdfs-3_4_1-tests_jar-_-any-9260884325467518822/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:31:02,433 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5773e0ea{HTTP/1.1, (http/1.1)}{localhost:33425} 2024-12-02T21:31:02,433 INFO [Time-limited test {}] server.Server(415): Started @252426ms 2024-12-02T21:31:02,434 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:31:02,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:02,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:03,433 WARN [Thread-1980 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5/data/data2/current/BP-193301389-172.17.0.3-1733175061531/current, will proceed with Du for space computation calculation, 2024-12-02T21:31:03,433 WARN [Thread-1979 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5/data/data1/current/BP-193301389-172.17.0.3-1733175061531/current, will proceed with Du for space computation calculation, 2024-12-02T21:31:03,451 WARN [Thread-1943 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:31:03,453 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfc970e59de12a75 with lease ID 0xf9bd1b891f029b42: Processing first storage report for DS-bef1635e-7798-45c4-bdbf-f0f5e47d8505 from datanode DatanodeRegistration(127.0.0.1:37623, datanodeUuid=413c5099-86fd-43ba-b884-ff6f625b9fb5, infoPort=45971, infoSecurePort=0, ipcPort=39319, storageInfo=lv=-57;cid=testClusterID;nsid=799138926;c=1733175061531) 2024-12-02T21:31:03,453 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfc970e59de12a75 with lease ID 0xf9bd1b891f029b42: from storage DS-bef1635e-7798-45c4-bdbf-f0f5e47d8505 node DatanodeRegistration(127.0.0.1:37623, datanodeUuid=413c5099-86fd-43ba-b884-ff6f625b9fb5, infoPort=45971, infoSecurePort=0, ipcPort=39319, storageInfo=lv=-57;cid=testClusterID;nsid=799138926;c=1733175061531), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:31:03,453 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfc970e59de12a75 with lease ID 0xf9bd1b891f029b42: Processing first storage report for DS-3f99cf16-95a7-4dfe-9627-917a3a972081 from datanode DatanodeRegistration(127.0.0.1:37623, datanodeUuid=413c5099-86fd-43ba-b884-ff6f625b9fb5, infoPort=45971, infoSecurePort=0, ipcPort=39319, storageInfo=lv=-57;cid=testClusterID;nsid=799138926;c=1733175061531) 2024-12-02T21:31:03,453 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfc970e59de12a75 with lease ID 0xf9bd1b891f029b42: from storage DS-3f99cf16-95a7-4dfe-9627-917a3a972081 node DatanodeRegistration(127.0.0.1:37623, datanodeUuid=413c5099-86fd-43ba-b884-ff6f625b9fb5, infoPort=45971, infoSecurePort=0, ipcPort=39319, storageInfo=lv=-57;cid=testClusterID;nsid=799138926;c=1733175061531), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T21:31:03,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:03,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:03,558 WARN [Thread-1990 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5/data/data3/current/BP-193301389-172.17.0.3-1733175061531/current, will proceed with Du for space computation calculation, 2024-12-02T21:31:03,558 WARN [Thread-1991 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5/data/data4/current/BP-193301389-172.17.0.3-1733175061531/current, will proceed with Du for space computation calculation, 2024-12-02T21:31:03,579 WARN [Thread-1966 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:31:03,581 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1b4123f02449f993 with lease ID 0xf9bd1b891f029b43: Processing first storage report for DS-37138f85-e2dd-489f-a068-c19763a54f74 from datanode DatanodeRegistration(127.0.0.1:34571, datanodeUuid=f044438b-b183-4f4c-92e3-760fac304e1d, infoPort=37697, infoSecurePort=0, ipcPort=46133, storageInfo=lv=-57;cid=testClusterID;nsid=799138926;c=1733175061531) 2024-12-02T21:31:03,581 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1b4123f02449f993 with lease ID 0xf9bd1b891f029b43: from storage DS-37138f85-e2dd-489f-a068-c19763a54f74 node DatanodeRegistration(127.0.0.1:34571, datanodeUuid=f044438b-b183-4f4c-92e3-760fac304e1d, infoPort=37697, infoSecurePort=0, ipcPort=46133, storageInfo=lv=-57;cid=testClusterID;nsid=799138926;c=1733175061531), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:31:03,581 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1b4123f02449f993 with lease ID 0xf9bd1b891f029b43: Processing first storage report for DS-ac517843-5283-41eb-96a4-c71f83b69f90 from datanode DatanodeRegistration(127.0.0.1:34571, datanodeUuid=f044438b-b183-4f4c-92e3-760fac304e1d, infoPort=37697, infoSecurePort=0, ipcPort=46133, storageInfo=lv=-57;cid=testClusterID;nsid=799138926;c=1733175061531) 2024-12-02T21:31:03,581 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1b4123f02449f993 with lease ID 0xf9bd1b891f029b43: from storage DS-ac517843-5283-41eb-96a4-c71f83b69f90 node DatanodeRegistration(127.0.0.1:34571, datanodeUuid=f044438b-b183-4f4c-92e3-760fac304e1d, infoPort=37697, infoSecurePort=0, ipcPort=46133, storageInfo=lv=-57;cid=testClusterID;nsid=799138926;c=1733175061531), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:31:03,666 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6 2024-12-02T21:31:03,691 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5/zookeeper_0, clientPort=58323, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:31:03,692 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58323 2024-12-02T21:31:03,693 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:03,695 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:03,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:31:03,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:31:03,706 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a with version=8 2024-12-02T21:31:03,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/hbase-staging 2024-12-02T21:31:03,708 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:31:03,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:03,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:03,708 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:31:03,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:03,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:31:03,708 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T21:31:03,708 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:31:03,708 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37427 2024-12-02T21:31:03,709 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37427 connecting to ZooKeeper ensemble=127.0.0.1:58323 2024-12-02T21:31:03,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:374270x0, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:31:03,808 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37427-0x10197f4b0350000 connected 2024-12-02T21:31:03,901 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:03,904 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:03,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:31:03,908 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a, hbase.cluster.distributed=false 2024-12-02T21:31:03,911 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:31:03,911 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37427 2024-12-02T21:31:03,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37427 2024-12-02T21:31:03,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37427 2024-12-02T21:31:03,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37427 2024-12-02T21:31:03,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37427 2024-12-02T21:31:03,928 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:31:03,928 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:03,928 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:03,928 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:31:03,928 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:03,928 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:31:03,928 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:31:03,928 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:31:03,929 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34593 2024-12-02T21:31:03,929 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34593 connecting to ZooKeeper ensemble=127.0.0.1:58323 2024-12-02T21:31:03,930 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:03,931 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:03,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:345930x0, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:31:03,942 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34593-0x10197f4b0350001 connected 2024-12-02T21:31:03,942 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:31:03,943 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:31:03,943 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:31:03,944 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:31:03,944 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:31:03,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34593 2024-12-02T21:31:03,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34593 2024-12-02T21:31:03,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34593 2024-12-02T21:31:03,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34593 2024-12-02T21:31:03,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34593 2024-12-02T21:31:03,957 DEBUG [M:0;87c3fdb6c570:37427 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87c3fdb6c570:37427 2024-12-02T21:31:03,957 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87c3fdb6c570,37427,1733175063707 2024-12-02T21:31:03,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:31:03,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:31:03,969 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87c3fdb6c570,37427,1733175063707 2024-12-02T21:31:03,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:31:03,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:03,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:03,980 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:31:03,980 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87c3fdb6c570,37427,1733175063707 from backup master directory 2024-12-02T21:31:03,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87c3fdb6c570,37427,1733175063707 2024-12-02T21:31:03,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:31:03,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:31:03,990 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:31:03,990 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87c3fdb6c570,37427,1733175063707 2024-12-02T21:31:03,995 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/hbase.id] with ID: eca96e6e-06ac-4a8e-85de-374de2cc4065 2024-12-02T21:31:03,995 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/.tmp/hbase.id 2024-12-02T21:31:04,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:31:04,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:31:04,001 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/.tmp/hbase.id]:[hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/hbase.id] 2024-12-02T21:31:04,011 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:04,011 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T21:31:04,012 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-02T21:31:04,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:04,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:04,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:31:04,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:31:04,027 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:31:04,028 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:31:04,028 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:31:04,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:31:04,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:31:04,035 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store 2024-12-02T21:31:04,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:31:04,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:31:04,041 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:04,041 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:31:04,041 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:04,041 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:04,041 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:31:04,041 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:04,041 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:04,042 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733175064041Disabling compacts and flushes for region at 1733175064041Disabling writes for close at 1733175064041Writing region close event to WAL at 1733175064041Closed at 1733175064041 2024-12-02T21:31:04,042 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/.initializing 2024-12-02T21:31:04,042 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/WALs/87c3fdb6c570,37427,1733175063707 2024-12-02T21:31:04,044 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C37427%2C1733175063707, suffix=, logDir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/WALs/87c3fdb6c570,37427,1733175063707, archiveDir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/oldWALs, maxLogs=10 2024-12-02T21:31:04,045 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C37427%2C1733175063707.1733175064045 2024-12-02T21:31:04,049 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/WALs/87c3fdb6c570,37427,1733175063707/87c3fdb6c570%2C37427%2C1733175063707.1733175064045 2024-12-02T21:31:04,052 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37697:37697),(127.0.0.1/127.0.0.1:45971:45971)] 2024-12-02T21:31:04,053 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:31:04,053 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:04,053 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,053 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,054 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:31:04,055 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:04,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:31:04,056 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,057 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:31:04,057 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,057 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:31:04,058 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:31:04,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:31:04,059 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:31:04,060 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,060 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,060 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,061 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,061 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,062 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:31:04,063 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:04,064 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:31:04,065 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732269, jitterRate=-0.06887194514274597}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:31:04,065 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733175064053Initializing all the Stores at 1733175064054 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175064054Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175064054Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175064054Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175064054Cleaning up temporary data from old regions at 1733175064061 (+7 ms)Region opened successfully at 1733175064065 (+4 ms) 2024-12-02T21:31:04,065 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:31:04,068 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4845ee5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:31:04,069 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T21:31:04,069 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:31:04,069 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:31:04,069 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:31:04,070 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T21:31:04,070 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T21:31:04,070 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:31:04,072 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:31:04,072 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:31:04,084 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:31:04,085 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:31:04,086 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:31:04,095 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:31:04,095 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:31:04,096 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:31:04,105 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:31:04,106 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:31:04,116 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:31:04,118 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:31:04,126 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:31:04,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:31:04,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:31:04,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:04,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:04,138 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87c3fdb6c570,37427,1733175063707, sessionid=0x10197f4b0350000, setting cluster-up flag (Was=false) 2024-12-02T21:31:04,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:04,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:04,190 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:31:04,193 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,37427,1733175063707 2024-12-02T21:31:04,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:04,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:04,248 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:31:04,250 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,37427,1733175063707 2024-12-02T21:31:04,252 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T21:31:04,255 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T21:31:04,256 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T21:31:04,256 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:31:04,256 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87c3fdb6c570,37427,1733175063707 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:31:04,259 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:31:04,259 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:31:04,259 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:31:04,259 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:31:04,259 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87c3fdb6c570:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:31:04,259 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,259 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:31:04,259 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,261 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733175094260 2024-12-02T21:31:04,261 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:31:04,261 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:31:04,261 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:31:04,261 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:31:04,261 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:31:04,261 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:31:04,262 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,262 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:31:04,262 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:31:04,262 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:31:04,262 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:31:04,262 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:31:04,263 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:31:04,263 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:31:04,263 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,264 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:31:04,264 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733175064263,5,FailOnTimeoutGroup] 2024-12-02T21:31:04,265 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733175064264,5,FailOnTimeoutGroup] 2024-12-02T21:31:04,265 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,265 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:31:04,265 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,265 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:31:04,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:31:04,271 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T21:31:04,271 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a 2024-12-02T21:31:04,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:31:04,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:31:04,278 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:04,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:31:04,280 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:31:04,280 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,281 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:04,281 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:31:04,282 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:31:04,282 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,283 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:04,283 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:31:04,284 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:31:04,284 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:04,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:31:04,285 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:31:04,285 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:04,286 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:31:04,287 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740 2024-12-02T21:31:04,287 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740 2024-12-02T21:31:04,288 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:31:04,288 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:31:04,288 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:31:04,289 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:31:04,291 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:31:04,292 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878821, jitterRate=0.11747986078262329}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:31:04,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733175064278Initializing all the Stores at 1733175064279 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175064279Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175064279Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175064279Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175064279Cleaning up temporary data from old regions at 1733175064288 (+9 ms)Region opened successfully at 1733175064292 (+4 ms) 2024-12-02T21:31:04,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:31:04,292 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:31:04,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:31:04,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:31:04,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:31:04,293 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:31:04,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733175064292Disabling compacts and flushes for region at 1733175064292Disabling writes for close at 1733175064292Writing region close event to WAL at 1733175064293 (+1 ms)Closed at 1733175064293 2024-12-02T21:31:04,294 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:31:04,294 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T21:31:04,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:31:04,295 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:31:04,296 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:31:04,348 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(746): ClusterId : eca96e6e-06ac-4a8e-85de-374de2cc4065 2024-12-02T21:31:04,349 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:31:04,361 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:31:04,361 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:31:04,377 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:31:04,377 DEBUG [RS:0;87c3fdb6c570:34593 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15d00b57, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:31:04,394 DEBUG [RS:0;87c3fdb6c570:34593 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87c3fdb6c570:34593 2024-12-02T21:31:04,394 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T21:31:04,394 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T21:31:04,394 DEBUG [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T21:31:04,395 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(2659): reportForDuty to master=87c3fdb6c570,37427,1733175063707 with port=34593, startcode=1733175063928 2024-12-02T21:31:04,395 DEBUG [RS:0;87c3fdb6c570:34593 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:31:04,397 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37475, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:31:04,397 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37427 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:04,397 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37427 {}] master.ServerManager(517): Registering regionserver=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:04,399 DEBUG [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a 2024-12-02T21:31:04,399 DEBUG [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45713 2024-12-02T21:31:04,399 DEBUG [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T21:31:04,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:31:04,411 DEBUG [RS:0;87c3fdb6c570:34593 {}] zookeeper.ZKUtil(111): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:04,411 WARN [RS:0;87c3fdb6c570:34593 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:31:04,411 INFO [RS:0;87c3fdb6c570:34593 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:31:04,411 DEBUG [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/WALs/87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:04,411 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87c3fdb6c570,34593,1733175063928] 2024-12-02T21:31:04,415 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:31:04,417 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:31:04,417 INFO [RS:0;87c3fdb6c570:34593 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:31:04,417 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,417 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T21:31:04,418 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T21:31:04,418 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:31:04,419 DEBUG [RS:0;87c3fdb6c570:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:31:04,420 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,421 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,421 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,421 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,421 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,421 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,34593,1733175063928-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:31:04,435 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:31:04,435 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,34593,1733175063928-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,435 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,435 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.Replication(171): 87c3fdb6c570,34593,1733175063928 started 2024-12-02T21:31:04,446 WARN [87c3fdb6c570:37427 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T21:31:04,447 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,447 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(1482): Serving as 87c3fdb6c570,34593,1733175063928, RpcServer on 87c3fdb6c570/172.17.0.3:34593, sessionid=0x10197f4b0350001 2024-12-02T21:31:04,447 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:31:04,447 DEBUG [RS:0;87c3fdb6c570:34593 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:04,447 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,34593,1733175063928' 2024-12-02T21:31:04,447 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:31:04,448 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:31:04,448 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:31:04,448 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:31:04,448 DEBUG [RS:0;87c3fdb6c570:34593 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:04,448 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,34593,1733175063928' 2024-12-02T21:31:04,448 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:31:04,448 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:31:04,449 DEBUG [RS:0;87c3fdb6c570:34593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:31:04,449 INFO [RS:0;87c3fdb6c570:34593 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:31:04,449 INFO [RS:0;87c3fdb6c570:34593 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:31:04,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:04,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:04,553 INFO [RS:0;87c3fdb6c570:34593 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C34593%2C1733175063928, suffix=, logDir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/WALs/87c3fdb6c570,34593,1733175063928, archiveDir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/oldWALs, maxLogs=32 2024-12-02T21:31:04,554 INFO [RS:0;87c3fdb6c570:34593 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C34593%2C1733175063928.1733175064554 2024-12-02T21:31:04,565 INFO [RS:0;87c3fdb6c570:34593 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/WALs/87c3fdb6c570,34593,1733175063928/87c3fdb6c570%2C34593%2C1733175063928.1733175064554 2024-12-02T21:31:04,566 DEBUG [RS:0;87c3fdb6c570:34593 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37697:37697),(127.0.0.1/127.0.0.1:45971:45971)] 2024-12-02T21:31:04,696 DEBUG [87c3fdb6c570:37427 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:31:04,697 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:04,700 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,34593,1733175063928, state=OPENING 2024-12-02T21:31:04,711 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:31:04,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:04,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:04,722 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:31:04,723 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:31:04,723 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:31:04,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,34593,1733175063928}] 2024-12-02T21:31:04,879 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:31:04,883 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52325, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:31:04,888 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T21:31:04,888 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:31:04,890 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C34593%2C1733175063928.meta, suffix=.meta, logDir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/WALs/87c3fdb6c570,34593,1733175063928, archiveDir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/oldWALs, maxLogs=32 2024-12-02T21:31:04,891 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C34593%2C1733175063928.meta.1733175064890.meta 2024-12-02T21:31:04,897 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/WALs/87c3fdb6c570,34593,1733175063928/87c3fdb6c570%2C34593%2C1733175063928.meta.1733175064890.meta 2024-12-02T21:31:04,898 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45971:45971),(127.0.0.1/127.0.0.1:37697:37697)] 2024-12-02T21:31:04,899 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:31:04,899 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:31:04,900 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:31:04,900 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:31:04,900 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:31:04,900 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:04,900 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T21:31:04,900 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T21:31:04,902 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:31:04,903 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:31:04,903 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:04,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:31:04,905 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:31:04,905 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:04,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:31:04,907 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:31:04,907 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:04,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:31:04,909 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:31:04,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:04,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:04,910 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:31:04,911 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740 2024-12-02T21:31:04,912 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740 2024-12-02T21:31:04,913 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:31:04,913 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:31:04,914 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:31:04,915 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:31:04,916 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=819907, jitterRate=0.04256628453731537}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:31:04,916 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T21:31:04,917 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733175064900Writing region info on filesystem at 1733175064900Initializing all the Stores at 1733175064901 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175064902 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175064902Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175064902Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175064902Cleaning up temporary data from old regions at 1733175064913 (+11 ms)Running coprocessor post-open hooks at 1733175064916 (+3 ms)Region opened successfully at 1733175064917 (+1 ms) 2024-12-02T21:31:04,918 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733175064878 2024-12-02T21:31:04,920 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:31:04,920 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T21:31:04,921 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:04,922 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,34593,1733175063928, state=OPEN 2024-12-02T21:31:04,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:31:04,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:31:04,957 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:04,957 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:31:04,957 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:31:04,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:31:04,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,34593,1733175063928 in 234 msec 2024-12-02T21:31:04,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:31:04,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 667 msec 2024-12-02T21:31:04,966 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:31:04,966 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T21:31:04,967 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:31:04,968 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,34593,1733175063928, seqNum=-1] 2024-12-02T21:31:04,968 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:31:04,969 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44775, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:31:04,976 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 721 msec 2024-12-02T21:31:04,976 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733175064976, completionTime=-1 2024-12-02T21:31:04,976 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:31:04,976 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T21:31:04,977 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T21:31:04,977 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733175124977 2024-12-02T21:31:04,977 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733175184977 2024-12-02T21:31:04,977 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-02T21:31:04,978 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,37427,1733175063707-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,978 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,37427,1733175063707-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,978 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,37427,1733175063707-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,978 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87c3fdb6c570:37427, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,978 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,978 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:04,979 DEBUG [master/87c3fdb6c570:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T21:31:04,981 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.991sec 2024-12-02T21:31:04,981 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:31:04,981 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:31:04,982 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:31:04,982 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:31:04,982 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:31:04,982 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,37427,1733175063707-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:31:04,982 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,37427,1733175063707-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:31:04,984 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:31:04,984 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:31:04,984 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,37427,1733175063707-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:05,048 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@324924bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:31:05,048 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87c3fdb6c570,37427,-1 for getting cluster id 2024-12-02T21:31:05,048 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T21:31:05,050 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'eca96e6e-06ac-4a8e-85de-374de2cc4065' 2024-12-02T21:31:05,051 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T21:31:05,051 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "eca96e6e-06ac-4a8e-85de-374de2cc4065" 2024-12-02T21:31:05,051 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3450a6df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:31:05,051 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87c3fdb6c570,37427,-1] 2024-12-02T21:31:05,052 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T21:31:05,052 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:05,053 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45368, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T21:31:05,054 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ce088a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:31:05,055 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:31:05,056 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,34593,1733175063928, seqNum=-1] 2024-12-02T21:31:05,057 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:31:05,058 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57576, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:31:05,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87c3fdb6c570,37427,1733175063707 2024-12-02T21:31:05,061 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:05,064 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T21:31:05,064 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T21:31:05,065 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 87c3fdb6c570,37427,1733175063707 2024-12-02T21:31:05,065 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@17c909cf 2024-12-02T21:31:05,065 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T21:31:05,066 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45370, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T21:31:05,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37427 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T21:31:05,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37427 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T21:31:05,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37427 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:31:05,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37427 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-02T21:31:05,069 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:31:05,069 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:05,069 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37427 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-02T21:31:05,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37427 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T21:31:05,070 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:31:05,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741835_1011 (size=381) 2024-12-02T21:31:05,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741835_1011 (size=381) 2024-12-02T21:31:05,079 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4969b595c3fd7103c37e1a4903c80ee2, NAME => 'TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a 2024-12-02T21:31:05,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741836_1012 (size=64) 2024-12-02T21:31:05,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741836_1012 (size=64) 2024-12-02T21:31:05,086 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:05,086 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 4969b595c3fd7103c37e1a4903c80ee2, disabling compactions & flushes 2024-12-02T21:31:05,086 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:05,086 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:05,086 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. after waiting 0 ms 2024-12-02T21:31:05,086 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:05,086 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:05,086 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4969b595c3fd7103c37e1a4903c80ee2: Waiting for close lock at 1733175065086Disabling compacts and flushes for region at 1733175065086Disabling writes for close at 1733175065086Writing region close event to WAL at 1733175065086Closed at 1733175065086 2024-12-02T21:31:05,087 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:31:05,088 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733175065088"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733175065088"}]},"ts":"1733175065088"} 2024-12-02T21:31:05,090 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T21:31:05,091 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:31:05,092 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733175065091"}]},"ts":"1733175065091"} 2024-12-02T21:31:05,094 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-02T21:31:05,094 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4969b595c3fd7103c37e1a4903c80ee2, ASSIGN}] 2024-12-02T21:31:05,095 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4969b595c3fd7103c37e1a4903c80ee2, ASSIGN 2024-12-02T21:31:05,096 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4969b595c3fd7103c37e1a4903c80ee2, ASSIGN; state=OFFLINE, location=87c3fdb6c570,34593,1733175063928; forceNewPlan=false, retain=false 2024-12-02T21:31:05,247 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4969b595c3fd7103c37e1a4903c80ee2, regionState=OPENING, regionLocation=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:05,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4969b595c3fd7103c37e1a4903c80ee2, ASSIGN because future has completed 2024-12-02T21:31:05,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4969b595c3fd7103c37e1a4903c80ee2, server=87c3fdb6c570,34593,1733175063928}] 2024-12-02T21:31:05,420 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:05,421 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4969b595c3fd7103c37e1a4903c80ee2, NAME => 'TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:31:05,422 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:05,422 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:05,422 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:05,422 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:05,425 INFO [StoreOpener-4969b595c3fd7103c37e1a4903c80ee2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:05,427 INFO [StoreOpener-4969b595c3fd7103c37e1a4903c80ee2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4969b595c3fd7103c37e1a4903c80ee2 columnFamilyName info 2024-12-02T21:31:05,427 DEBUG [StoreOpener-4969b595c3fd7103c37e1a4903c80ee2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:05,427 INFO [StoreOpener-4969b595c3fd7103c37e1a4903c80ee2-1 {}] regionserver.HStore(327): Store=4969b595c3fd7103c37e1a4903c80ee2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:31:05,428 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:05,429 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:05,429 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:05,430 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:05,430 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:05,432 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:05,436 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:31:05,436 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4969b595c3fd7103c37e1a4903c80ee2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821861, jitterRate=0.04505133628845215}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:31:05,436 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:05,437 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4969b595c3fd7103c37e1a4903c80ee2: Running coprocessor pre-open hook at 1733175065422Writing region info on filesystem at 1733175065422Initializing all the Stores at 1733175065424 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175065424Cleaning up temporary data from old regions at 1733175065430 (+6 ms)Running coprocessor post-open hooks at 1733175065436 (+6 ms)Region opened successfully at 1733175065437 (+1 ms) 2024-12-02T21:31:05,438 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2., pid=6, masterSystemTime=1733175065411 2024-12-02T21:31:05,440 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:05,440 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:05,441 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4969b595c3fd7103c37e1a4903c80ee2, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:05,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4969b595c3fd7103c37e1a4903c80ee2, server=87c3fdb6c570,34593,1733175063928 because future has completed 2024-12-02T21:31:05,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T21:31:05,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4969b595c3fd7103c37e1a4903c80ee2, server=87c3fdb6c570,34593,1733175063928 in 189 msec 2024-12-02T21:31:05,449 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T21:31:05,450 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4969b595c3fd7103c37e1a4903c80ee2, ASSIGN in 353 msec 2024-12-02T21:31:05,450 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:31:05,451 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733175065451"}]},"ts":"1733175065451"} 2024-12-02T21:31:05,453 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-02T21:31:05,454 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:31:05,456 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 387 msec 2024-12-02T21:31:05,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:05,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:05,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:05,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:05,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:05,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:05,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:05,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:05,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:05,989 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,003 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,003 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,003 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,511 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:31:06,512 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:06,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:06,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,535 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,535 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:06,540 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-02T21:31:06,540 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-02T21:31:06,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:31:07,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:07,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:08,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:08,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:09,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:09,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:10,415 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T21:31:10,417 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-02T21:31:10,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:10,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:11,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:11,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:12,047 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:31:12,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,070 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,075 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:12,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:12,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:13,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:13,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:14,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:14,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:15,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37427 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T21:31:15,093 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-02T21:31:15,093 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-02T21:31:15,099 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-02T21:31:15,099 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:15,102 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2., hostname=87c3fdb6c570,34593,1733175063928, seqNum=2] 2024-12-02T21:31:15,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:15,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4969b595c3fd7103c37e1a4903c80ee2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:31:15,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/661d30203135418387074861fa1cea2e is 1080, key is row0001/info:/1733175075104/Put/seqid=0 2024-12-02T21:31:15,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741837_1013 (size=12509) 2024-12-02T21:31:15,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741837_1013 (size=12509) 2024-12-02T21:31:15,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/661d30203135418387074861fa1cea2e 2024-12-02T21:31:15,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/661d30203135418387074861fa1cea2e as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/661d30203135418387074861fa1cea2e 2024-12-02T21:31:15,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/661d30203135418387074861fa1cea2e, entries=7, sequenceid=11, filesize=12.2 K 2024-12-02T21:31:15,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 4969b595c3fd7103c37e1a4903c80ee2 in 39ms, sequenceid=11, compaction requested=false 2024-12-02T21:31:15,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4969b595c3fd7103c37e1a4903c80ee2: 2024-12-02T21:31:15,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:15,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4969b595c3fd7103c37e1a4903c80ee2 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-12-02T21:31:15,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/2625287828414270bea404d582b626c8 is 1080, key is row0008/info:/1733175075124/Put/seqid=0 2024-12-02T21:31:15,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741838_1014 (size=26530) 2024-12-02T21:31:15,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741838_1014 (size=26530) 2024-12-02T21:31:15,173 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/2625287828414270bea404d582b626c8 2024-12-02T21:31:15,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/2625287828414270bea404d582b626c8 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2625287828414270bea404d582b626c8 2024-12-02T21:31:15,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2625287828414270bea404d582b626c8, entries=20, sequenceid=34, filesize=25.9 K 2024-12-02T21:31:15,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=5.25 KB/5380 for 4969b595c3fd7103c37e1a4903c80ee2 in 20ms, sequenceid=34, compaction requested=false 2024-12-02T21:31:15,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4969b595c3fd7103c37e1a4903c80ee2: 2024-12-02T21:31:15,184 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=38.1 K, sizeToCheck=16.0 K 2024-12-02T21:31:15,184 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:15,184 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2625287828414270bea404d582b626c8 because midkey is the same as first or last row 2024-12-02T21:31:15,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:15,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:16,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:16,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:17,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:17,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4969b595c3fd7103c37e1a4903c80ee2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:31:17,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/dbf73743a37946a4bfe6009dfcc5bb4a is 1080, key is row0028/info:/1733175075164/Put/seqid=0 2024-12-02T21:31:17,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741839_1015 (size=12509) 2024-12-02T21:31:17,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741839_1015 (size=12509) 2024-12-02T21:31:17,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/dbf73743a37946a4bfe6009dfcc5bb4a 2024-12-02T21:31:17,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/dbf73743a37946a4bfe6009dfcc5bb4a as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/dbf73743a37946a4bfe6009dfcc5bb4a 2024-12-02T21:31:17,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/dbf73743a37946a4bfe6009dfcc5bb4a, entries=7, sequenceid=44, filesize=12.2 K 2024-12-02T21:31:17,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 4969b595c3fd7103c37e1a4903c80ee2 in 27ms, sequenceid=44, compaction requested=true 2024-12-02T21:31:17,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4969b595c3fd7103c37e1a4903c80ee2: 2024-12-02T21:31:17,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:17,210 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=50.3 K, sizeToCheck=16.0 K 2024-12-02T21:31:17,210 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:17,210 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2625287828414270bea404d582b626c8 because midkey is the same as first or last row 2024-12-02T21:31:17,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4969b595c3fd7103c37e1a4903c80ee2:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:31:17,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:17,211 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:31:17,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4969b595c3fd7103c37e1a4903c80ee2 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-02T21:31:17,212 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 51548 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:31:17,212 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1541): 4969b595c3fd7103c37e1a4903c80ee2/info is initiating minor compaction (all files) 2024-12-02T21:31:17,212 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4969b595c3fd7103c37e1a4903c80ee2/info in TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:17,212 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/661d30203135418387074861fa1cea2e, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2625287828414270bea404d582b626c8, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/dbf73743a37946a4bfe6009dfcc5bb4a] into tmpdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp, totalSize=50.3 K 2024-12-02T21:31:17,213 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 661d30203135418387074861fa1cea2e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733175075104 2024-12-02T21:31:17,213 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2625287828414270bea404d582b626c8, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733175075124 2024-12-02T21:31:17,214 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting dbf73743a37946a4bfe6009dfcc5bb4a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1733175075164 2024-12-02T21:31:17,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/0d1d729b1c8e45ddaf7086654ab17e8b is 1080, key is row0035/info:/1733175077185/Put/seqid=0 2024-12-02T21:31:17,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741840_1016 (size=17894) 2024-12-02T21:31:17,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741840_1016 (size=17894) 2024-12-02T21:31:17,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/0d1d729b1c8e45ddaf7086654ab17e8b 2024-12-02T21:31:17,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/0d1d729b1c8e45ddaf7086654ab17e8b as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/0d1d729b1c8e45ddaf7086654ab17e8b 2024-12-02T21:31:17,227 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4969b595c3fd7103c37e1a4903c80ee2#info#compaction#59 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:31:17,227 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/50dc1ff0e8514b308e7718b35db1e668 is 1080, key is row0001/info:/1733175075104/Put/seqid=0 2024-12-02T21:31:17,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/0d1d729b1c8e45ddaf7086654ab17e8b, entries=12, sequenceid=59, filesize=17.5 K 2024-12-02T21:31:17,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 4969b595c3fd7103c37e1a4903c80ee2 in 22ms, sequenceid=59, compaction requested=false 2024-12-02T21:31:17,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741841_1017 (size=41747) 2024-12-02T21:31:17,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4969b595c3fd7103c37e1a4903c80ee2: 2024-12-02T21:31:17,234 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.8 K, sizeToCheck=16.0 K 2024-12-02T21:31:17,234 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:17,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741841_1017 (size=41747) 2024-12-02T21:31:17,234 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2625287828414270bea404d582b626c8 because midkey is the same as first or last row 2024-12-02T21:31:17,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:17,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4969b595c3fd7103c37e1a4903c80ee2 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-02T21:31:17,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/931ecc79ca9d4e8ca49ec42d8c0d624a is 1080, key is row0047/info:/1733175077211/Put/seqid=0 2024-12-02T21:31:17,241 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/50dc1ff0e8514b308e7718b35db1e668 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/50dc1ff0e8514b308e7718b35db1e668 2024-12-02T21:31:17,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741842_1018 (size=16817) 2024-12-02T21:31:17,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741842_1018 (size=16817) 2024-12-02T21:31:17,248 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4969b595c3fd7103c37e1a4903c80ee2/info of 4969b595c3fd7103c37e1a4903c80ee2 into 50dc1ff0e8514b308e7718b35db1e668(size=40.8 K), total size for store is 58.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:31:17,248 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4969b595c3fd7103c37e1a4903c80ee2: 2024-12-02T21:31:17,248 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2., storeName=4969b595c3fd7103c37e1a4903c80ee2/info, priority=13, startTime=1733175077210; duration=0sec 2024-12-02T21:31:17,249 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=58.2 K, sizeToCheck=16.0 K 2024-12-02T21:31:17,249 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:17,249 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/50dc1ff0e8514b308e7718b35db1e668 because midkey is the same as first or last row 2024-12-02T21:31:17,249 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=58.2 K, sizeToCheck=16.0 K 2024-12-02T21:31:17,249 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:17,249 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/50dc1ff0e8514b308e7718b35db1e668 because midkey is the same as first or last row 2024-12-02T21:31:17,249 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=58.2 K, sizeToCheck=16.0 K 2024-12-02T21:31:17,249 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:17,249 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/50dc1ff0e8514b308e7718b35db1e668 because midkey is the same as first or last row 2024-12-02T21:31:17,249 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:17,249 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4969b595c3fd7103c37e1a4903c80ee2:info 2024-12-02T21:31:17,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:17,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:17,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/931ecc79ca9d4e8ca49ec42d8c0d624a 2024-12-02T21:31:17,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/931ecc79ca9d4e8ca49ec42d8c0d624a as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/931ecc79ca9d4e8ca49ec42d8c0d624a 2024-12-02T21:31:17,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/931ecc79ca9d4e8ca49ec42d8c0d624a, entries=11, sequenceid=73, filesize=16.4 K 2024-12-02T21:31:17,668 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=7.36 KB/7532 for 4969b595c3fd7103c37e1a4903c80ee2 in 434ms, sequenceid=73, compaction requested=true 2024-12-02T21:31:17,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4969b595c3fd7103c37e1a4903c80ee2: 2024-12-02T21:31:17,668 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-12-02T21:31:17,668 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:17,668 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/50dc1ff0e8514b308e7718b35db1e668 because midkey is the same as first or last row 2024-12-02T21:31:17,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4969b595c3fd7103c37e1a4903c80ee2:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:31:17,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:17,668 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:31:17,670 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76458 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:31:17,670 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1541): 4969b595c3fd7103c37e1a4903c80ee2/info is initiating minor compaction (all files) 2024-12-02T21:31:17,670 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4969b595c3fd7103c37e1a4903c80ee2/info in TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:17,670 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/50dc1ff0e8514b308e7718b35db1e668, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/0d1d729b1c8e45ddaf7086654ab17e8b, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/931ecc79ca9d4e8ca49ec42d8c0d624a] into tmpdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp, totalSize=74.7 K 2024-12-02T21:31:17,670 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 50dc1ff0e8514b308e7718b35db1e668, keycount=34, bloomtype=ROW, size=40.8 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1733175075104 2024-12-02T21:31:17,671 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0d1d729b1c8e45ddaf7086654ab17e8b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=59, earliestPutTs=1733175077185 2024-12-02T21:31:17,671 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 931ecc79ca9d4e8ca49ec42d8c0d624a, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733175077211 2024-12-02T21:31:17,685 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4969b595c3fd7103c37e1a4903c80ee2#info#compaction#61 average throughput is 19.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:31:17,685 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/2f802570f762447ba9dcce6bbfd314f3 is 1080, key is row0001/info:/1733175075104/Put/seqid=0 2024-12-02T21:31:17,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741843_1019 (size=66689) 2024-12-02T21:31:17,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741843_1019 (size=66689) 2024-12-02T21:31:17,694 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/2f802570f762447ba9dcce6bbfd314f3 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2f802570f762447ba9dcce6bbfd314f3 2024-12-02T21:31:17,699 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4969b595c3fd7103c37e1a4903c80ee2/info of 4969b595c3fd7103c37e1a4903c80ee2 into 2f802570f762447ba9dcce6bbfd314f3(size=65.1 K), total size for store is 65.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:31:17,699 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4969b595c3fd7103c37e1a4903c80ee2: 2024-12-02T21:31:17,699 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2., storeName=4969b595c3fd7103c37e1a4903c80ee2/info, priority=13, startTime=1733175077668; duration=0sec 2024-12-02T21:31:17,699 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-12-02T21:31:17,699 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:17,700 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2f802570f762447ba9dcce6bbfd314f3 because midkey is the same as first or last row 2024-12-02T21:31:17,700 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-12-02T21:31:17,700 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:17,700 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2f802570f762447ba9dcce6bbfd314f3 because midkey is the same as first or last row 2024-12-02T21:31:17,700 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-12-02T21:31:17,700 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:17,700 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2f802570f762447ba9dcce6bbfd314f3 because midkey is the same as first or last row 2024-12-02T21:31:17,700 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:17,700 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4969b595c3fd7103c37e1a4903c80ee2:info 2024-12-02T21:31:18,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:18,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:19,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4969b595c3fd7103c37e1a4903c80ee2 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-02T21:31:19,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/78e9e36d2c3b4e69bc217307b558a6dd is 1080, key is row0058/info:/1733175077235/Put/seqid=0 2024-12-02T21:31:19,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741844_1020 (size=13586) 2024-12-02T21:31:19,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741844_1020 (size=13586) 2024-12-02T21:31:19,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/78e9e36d2c3b4e69bc217307b558a6dd 2024-12-02T21:31:19,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/78e9e36d2c3b4e69bc217307b558a6dd as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/78e9e36d2c3b4e69bc217307b558a6dd 2024-12-02T21:31:19,282 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/78e9e36d2c3b4e69bc217307b558a6dd, entries=8, sequenceid=86, filesize=13.3 K 2024-12-02T21:31:19,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=11.56 KB/11836 for 4969b595c3fd7103c37e1a4903c80ee2 in 28ms, sequenceid=86, compaction requested=false 2024-12-02T21:31:19,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4969b595c3fd7103c37e1a4903c80ee2: 2024-12-02T21:31:19,283 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-12-02T21:31:19,283 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:19,283 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2f802570f762447ba9dcce6bbfd314f3 because midkey is the same as first or last row 2024-12-02T21:31:19,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,284 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4969b595c3fd7103c37e1a4903c80ee2 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-02T21:31:19,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/a6e8c22c63894acebfad08a9920539d2 is 1080, key is row0066/info:/1733175079257/Put/seqid=0 2024-12-02T21:31:19,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741845_1021 (size=17894) 2024-12-02T21:31:19,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741845_1021 (size=17894) 2024-12-02T21:31:19,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/a6e8c22c63894acebfad08a9920539d2 2024-12-02T21:31:19,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/a6e8c22c63894acebfad08a9920539d2 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6e8c22c63894acebfad08a9920539d2 2024-12-02T21:31:19,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6e8c22c63894acebfad08a9920539d2, entries=12, sequenceid=101, filesize=17.5 K 2024-12-02T21:31:19,309 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 4969b595c3fd7103c37e1a4903c80ee2 in 24ms, sequenceid=101, compaction requested=true 2024-12-02T21:31:19,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4969b595c3fd7103c37e1a4903c80ee2: 2024-12-02T21:31:19,309 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.9 K, sizeToCheck=16.0 K 2024-12-02T21:31:19,309 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:19,309 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2f802570f762447ba9dcce6bbfd314f3 because midkey is the same as first or last row 2024-12-02T21:31:19,309 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:31:19,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4969b595c3fd7103c37e1a4903c80ee2:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:31:19,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:19,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,310 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4969b595c3fd7103c37e1a4903c80ee2 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-02T21:31:19,311 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 98169 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:31:19,311 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1541): 4969b595c3fd7103c37e1a4903c80ee2/info is initiating minor compaction (all files) 2024-12-02T21:31:19,311 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4969b595c3fd7103c37e1a4903c80ee2/info in TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:19,311 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2f802570f762447ba9dcce6bbfd314f3, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/78e9e36d2c3b4e69bc217307b558a6dd, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6e8c22c63894acebfad08a9920539d2] into tmpdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp, totalSize=95.9 K 2024-12-02T21:31:19,311 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2f802570f762447ba9dcce6bbfd314f3, keycount=57, bloomtype=ROW, size=65.1 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733175075104 2024-12-02T21:31:19,312 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 78e9e36d2c3b4e69bc217307b558a6dd, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1733175077235 2024-12-02T21:31:19,312 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting a6e8c22c63894acebfad08a9920539d2, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733175079257 2024-12-02T21:31:19,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/3d976e91367d4e62bfeae47b85ccddf0 is 1080, key is row0078/info:/1733175079285/Put/seqid=0 2024-12-02T21:31:19,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741846_1022 (size=17894) 2024-12-02T21:31:19,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741846_1022 (size=17894) 2024-12-02T21:31:19,331 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4969b595c3fd7103c37e1a4903c80ee2#info#compaction#65 average throughput is 19.75 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:31:19,332 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/a6971c7b12714adabba25079dbd8fdf8 is 1080, key is row0001/info:/1733175075104/Put/seqid=0 2024-12-02T21:31:19,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741847_1023 (size=88408) 2024-12-02T21:31:19,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741847_1023 (size=88408) 2024-12-02T21:31:19,343 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/a6971c7b12714adabba25079dbd8fdf8 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6971c7b12714adabba25079dbd8fdf8 2024-12-02T21:31:19,349 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4969b595c3fd7103c37e1a4903c80ee2/info of 4969b595c3fd7103c37e1a4903c80ee2 into a6971c7b12714adabba25079dbd8fdf8(size=86.3 K), total size for store is 86.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:31:19,349 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4969b595c3fd7103c37e1a4903c80ee2: 2024-12-02T21:31:19,349 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2., storeName=4969b595c3fd7103c37e1a4903c80ee2/info, priority=13, startTime=1733175079309; duration=0sec 2024-12-02T21:31:19,349 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=86.3 K, sizeToCheck=16.0 K 2024-12-02T21:31:19,349 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:19,349 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=86.3 K, sizeToCheck=16.0 K 2024-12-02T21:31:19,349 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:19,349 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=86.3 K, sizeToCheck=16.0 K 2024-12-02T21:31:19,349 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:31:19,350 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:19,350 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:19,350 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4969b595c3fd7103c37e1a4903c80ee2:info 2024-12-02T21:31:19,351 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37427 {}] assignment.AssignmentManager(1363): Split request from 87c3fdb6c570,34593,1733175063928, parent={ENCODED => 4969b595c3fd7103c37e1a4903c80ee2, NAME => 'TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-02T21:31:19,356 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37427 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:19,360 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37427 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4969b595c3fd7103c37e1a4903c80ee2, daughterA=cc8225a2b44932338372e937911a920c, daughterB=d725ced47985accfdccea3024939292e 2024-12-02T21:31:19,361 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4969b595c3fd7103c37e1a4903c80ee2, daughterA=cc8225a2b44932338372e937911a920c, daughterB=d725ced47985accfdccea3024939292e 2024-12-02T21:31:19,361 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4969b595c3fd7103c37e1a4903c80ee2, daughterA=cc8225a2b44932338372e937911a920c, daughterB=d725ced47985accfdccea3024939292e 2024-12-02T21:31:19,361 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4969b595c3fd7103c37e1a4903c80ee2, daughterA=cc8225a2b44932338372e937911a920c, daughterB=d725ced47985accfdccea3024939292e 2024-12-02T21:31:19,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4969b595c3fd7103c37e1a4903c80ee2, UNASSIGN}] 2024-12-02T21:31:19,369 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4969b595c3fd7103c37e1a4903c80ee2, UNASSIGN 2024-12-02T21:31:19,371 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=4969b595c3fd7103c37e1a4903c80ee2, regionState=CLOSING, regionLocation=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:19,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4969b595c3fd7103c37e1a4903c80ee2, UNASSIGN because future has completed 2024-12-02T21:31:19,373 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T21:31:19,373 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4969b595c3fd7103c37e1a4903c80ee2, server=87c3fdb6c570,34593,1733175063928}] 2024-12-02T21:31:19,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:19,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:19,534 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,535 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-02T21:31:19,536 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 4969b595c3fd7103c37e1a4903c80ee2, disabling compactions & flushes 2024-12-02T21:31:19,536 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1993): waiting for 0 compactions & cache flush to complete for region TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:19,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/3d976e91367d4e62bfeae47b85ccddf0 2024-12-02T21:31:19,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/3d976e91367d4e62bfeae47b85ccddf0 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/3d976e91367d4e62bfeae47b85ccddf0 2024-12-02T21:31:19,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/3d976e91367d4e62bfeae47b85ccddf0, entries=12, sequenceid=116, filesize=17.5 K 2024-12-02T21:31:19,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for 4969b595c3fd7103c37e1a4903c80ee2 in 439ms, sequenceid=116, compaction requested=false 2024-12-02T21:31:19,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4969b595c3fd7103c37e1a4903c80ee2: 2024-12-02T21:31:19,750 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:19,750 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:19,750 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. after waiting 0 ms 2024-12-02T21:31:19,750 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:19,750 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 4969b595c3fd7103c37e1a4903c80ee2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:31:19,755 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/4515bdbac98a45f3a533fcbb892d5f3d is 1080, key is row0090/info:/1733175079311/Put/seqid=0 2024-12-02T21:31:19,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741848_1024 (size=12509) 2024-12-02T21:31:19,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741848_1024 (size=12509) 2024-12-02T21:31:19,761 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/4515bdbac98a45f3a533fcbb892d5f3d 2024-12-02T21:31:19,767 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/.tmp/info/4515bdbac98a45f3a533fcbb892d5f3d as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/4515bdbac98a45f3a533fcbb892d5f3d 2024-12-02T21:31:19,775 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/4515bdbac98a45f3a533fcbb892d5f3d, entries=7, sequenceid=127, filesize=12.2 K 2024-12-02T21:31:19,776 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 4969b595c3fd7103c37e1a4903c80ee2 in 26ms, sequenceid=127, compaction requested=true 2024-12-02T21:31:19,778 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/661d30203135418387074861fa1cea2e, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2625287828414270bea404d582b626c8, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/50dc1ff0e8514b308e7718b35db1e668, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/dbf73743a37946a4bfe6009dfcc5bb4a, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/0d1d729b1c8e45ddaf7086654ab17e8b, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2f802570f762447ba9dcce6bbfd314f3, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/931ecc79ca9d4e8ca49ec42d8c0d624a, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/78e9e36d2c3b4e69bc217307b558a6dd, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6e8c22c63894acebfad08a9920539d2] to archive 2024-12-02T21:31:19,779 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T21:31:19,782 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/661d30203135418387074861fa1cea2e to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/661d30203135418387074861fa1cea2e 2024-12-02T21:31:19,784 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2625287828414270bea404d582b626c8 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2625287828414270bea404d582b626c8 2024-12-02T21:31:19,785 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/50dc1ff0e8514b308e7718b35db1e668 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/50dc1ff0e8514b308e7718b35db1e668 2024-12-02T21:31:19,787 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/dbf73743a37946a4bfe6009dfcc5bb4a to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/dbf73743a37946a4bfe6009dfcc5bb4a 2024-12-02T21:31:19,789 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/0d1d729b1c8e45ddaf7086654ab17e8b to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/0d1d729b1c8e45ddaf7086654ab17e8b 2024-12-02T21:31:19,790 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2f802570f762447ba9dcce6bbfd314f3 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/2f802570f762447ba9dcce6bbfd314f3 2024-12-02T21:31:19,791 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/931ecc79ca9d4e8ca49ec42d8c0d624a to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/931ecc79ca9d4e8ca49ec42d8c0d624a 2024-12-02T21:31:19,793 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/78e9e36d2c3b4e69bc217307b558a6dd to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/78e9e36d2c3b4e69bc217307b558a6dd 2024-12-02T21:31:19,794 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6e8c22c63894acebfad08a9920539d2 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6e8c22c63894acebfad08a9920539d2 2024-12-02T21:31:19,802 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-12-02T21:31:19,803 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. 2024-12-02T21:31:19,803 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 4969b595c3fd7103c37e1a4903c80ee2: Waiting for close lock at 1733175079536Running coprocessor pre-close hooks at 1733175079536Disabling compacts and flushes for region at 1733175079536Disabling writes for close at 1733175079750 (+214 ms)Obtaining lock to block concurrent updates at 1733175079750Preparing flush snapshotting stores in 4969b595c3fd7103c37e1a4903c80ee2 at 1733175079750Finished memstore snapshotting TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2., syncing WAL and waiting on mvcc, flushsize=dataSize=7532, getHeapSize=8304, getOffHeapSize=0, getCellsCount=7 at 1733175079750Flushing stores of TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. at 1733175079751 (+1 ms)Flushing 4969b595c3fd7103c37e1a4903c80ee2/info: creating writer at 1733175079751Flushing 4969b595c3fd7103c37e1a4903c80ee2/info: appending metadata at 1733175079755 (+4 ms)Flushing 4969b595c3fd7103c37e1a4903c80ee2/info: closing flushed file at 1733175079755Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78c4eb3a: reopening flushed file at 1733175079767 (+12 ms)Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 4969b595c3fd7103c37e1a4903c80ee2 in 26ms, sequenceid=127, compaction requested=true at 1733175079776 (+9 ms)Writing region close event to WAL at 1733175079797 (+21 ms)Running coprocessor post-close hooks at 1733175079803 (+6 ms)Closed at 1733175079803 2024-12-02T21:31:19,807 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,808 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=4969b595c3fd7103c37e1a4903c80ee2, regionState=CLOSED 2024-12-02T21:31:19,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4969b595c3fd7103c37e1a4903c80ee2, server=87c3fdb6c570,34593,1733175063928 because future has completed 2024-12-02T21:31:19,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-02T21:31:19,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 4969b595c3fd7103c37e1a4903c80ee2, server=87c3fdb6c570,34593,1733175063928 in 439 msec 2024-12-02T21:31:19,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-02T21:31:19,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4969b595c3fd7103c37e1a4903c80ee2, UNASSIGN in 448 msec 2024-12-02T21:31:19,829 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:19,836 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=4969b595c3fd7103c37e1a4903c80ee2, threads=3 2024-12-02T21:31:19,838 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/4515bdbac98a45f3a533fcbb892d5f3d for region: 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,838 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/3d976e91367d4e62bfeae47b85ccddf0 for region: 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,838 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6971c7b12714adabba25079dbd8fdf8 for region: 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,848 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/3d976e91367d4e62bfeae47b85ccddf0, top=true 2024-12-02T21:31:19,848 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/4515bdbac98a45f3a533fcbb892d5f3d, top=true 2024-12-02T21:31:19,855 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-4515bdbac98a45f3a533fcbb892d5f3d for child: d725ced47985accfdccea3024939292e, parent: 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,855 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/4515bdbac98a45f3a533fcbb892d5f3d for region: 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741849_1025 (size=27) 2024-12-02T21:31:19,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741849_1025 (size=27) 2024-12-02T21:31:19,861 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-3d976e91367d4e62bfeae47b85ccddf0 for child: d725ced47985accfdccea3024939292e, parent: 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,861 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/3d976e91367d4e62bfeae47b85ccddf0 for region: 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741850_1026 (size=27) 2024-12-02T21:31:19,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741850_1026 (size=27) 2024-12-02T21:31:19,864 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6971c7b12714adabba25079dbd8fdf8 for region: 4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:19,866 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 4969b595c3fd7103c37e1a4903c80ee2 Daughter A: [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2] storefiles, Daughter B: [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-3d976e91367d4e62bfeae47b85ccddf0, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-4515bdbac98a45f3a533fcbb892d5f3d, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2] storefiles. 2024-12-02T21:31:19,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741851_1027 (size=71) 2024-12-02T21:31:19,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741851_1027 (size=71) 2024-12-02T21:31:19,874 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:19,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741852_1028 (size=71) 2024-12-02T21:31:19,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741852_1028 (size=71) 2024-12-02T21:31:19,887 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:19,896 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-12-02T21:31:19,899 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-12-02T21:31:19,901 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733175079901"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733175079901"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733175079901"}]},"ts":"1733175079901"} 2024-12-02T21:31:19,901 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733175079901"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733175079901"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733175079901"}]},"ts":"1733175079901"} 2024-12-02T21:31:19,901 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733175079901"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733175079901"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733175079901"}]},"ts":"1733175079901"} 2024-12-02T21:31:19,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cc8225a2b44932338372e937911a920c, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d725ced47985accfdccea3024939292e, ASSIGN}] 2024-12-02T21:31:19,917 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d725ced47985accfdccea3024939292e, ASSIGN 2024-12-02T21:31:19,917 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cc8225a2b44932338372e937911a920c, ASSIGN 2024-12-02T21:31:19,918 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d725ced47985accfdccea3024939292e, ASSIGN; state=SPLITTING_NEW, location=87c3fdb6c570,34593,1733175063928; forceNewPlan=false, retain=false 2024-12-02T21:31:19,918 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cc8225a2b44932338372e937911a920c, ASSIGN; state=SPLITTING_NEW, location=87c3fdb6c570,34593,1733175063928; forceNewPlan=false, retain=false 2024-12-02T21:31:20,069 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=cc8225a2b44932338372e937911a920c, regionState=OPENING, regionLocation=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:20,069 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=d725ced47985accfdccea3024939292e, regionState=OPENING, regionLocation=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:20,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cc8225a2b44932338372e937911a920c, ASSIGN because future has completed 2024-12-02T21:31:20,072 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure cc8225a2b44932338372e937911a920c, server=87c3fdb6c570,34593,1733175063928}] 2024-12-02T21:31:20,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d725ced47985accfdccea3024939292e, ASSIGN because future has completed 2024-12-02T21:31:20,074 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure d725ced47985accfdccea3024939292e, server=87c3fdb6c570,34593,1733175063928}] 2024-12-02T21:31:20,234 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:20,234 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => d725ced47985accfdccea3024939292e, NAME => 'TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-02T21:31:20,235 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling d725ced47985accfdccea3024939292e 2024-12-02T21:31:20,235 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:20,235 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for d725ced47985accfdccea3024939292e 2024-12-02T21:31:20,235 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for d725ced47985accfdccea3024939292e 2024-12-02T21:31:20,238 INFO [StoreOpener-d725ced47985accfdccea3024939292e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d725ced47985accfdccea3024939292e 2024-12-02T21:31:20,239 INFO [StoreOpener-d725ced47985accfdccea3024939292e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d725ced47985accfdccea3024939292e columnFamilyName info 2024-12-02T21:31:20,239 DEBUG [StoreOpener-d725ced47985accfdccea3024939292e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:20,252 DEBUG [StoreOpener-d725ced47985accfdccea3024939292e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-3d976e91367d4e62bfeae47b85ccddf0 2024-12-02T21:31:20,256 DEBUG [StoreOpener-d725ced47985accfdccea3024939292e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-4515bdbac98a45f3a533fcbb892d5f3d 2024-12-02T21:31:20,265 DEBUG [StoreOpener-d725ced47985accfdccea3024939292e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2->hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6971c7b12714adabba25079dbd8fdf8-top 2024-12-02T21:31:20,266 INFO [StoreOpener-d725ced47985accfdccea3024939292e-1 {}] regionserver.HStore(327): Store=d725ced47985accfdccea3024939292e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:31:20,266 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for d725ced47985accfdccea3024939292e 2024-12-02T21:31:20,267 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e 2024-12-02T21:31:20,268 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e 2024-12-02T21:31:20,268 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for d725ced47985accfdccea3024939292e 2024-12-02T21:31:20,269 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for d725ced47985accfdccea3024939292e 2024-12-02T21:31:20,270 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for d725ced47985accfdccea3024939292e 2024-12-02T21:31:20,271 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened d725ced47985accfdccea3024939292e; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717889, jitterRate=-0.08715754747390747}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:31:20,271 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d725ced47985accfdccea3024939292e 2024-12-02T21:31:20,271 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for d725ced47985accfdccea3024939292e: Running coprocessor pre-open hook at 1733175080235Writing region info on filesystem at 1733175080235Initializing all the Stores at 1733175080237 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175080237Cleaning up temporary data from old regions at 1733175080269 (+32 ms)Running coprocessor post-open hooks at 1733175080271 (+2 ms)Region opened successfully at 1733175080271 2024-12-02T21:31:20,272 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., pid=13, masterSystemTime=1733175080225 2024-12-02T21:31:20,272 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store d725ced47985accfdccea3024939292e:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:31:20,272 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:20,272 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:31:20,273 INFO [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:20,273 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HStore(1541): d725ced47985accfdccea3024939292e/info is initiating minor compaction (all files) 2024-12-02T21:31:20,273 INFO [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d725ced47985accfdccea3024939292e/info in TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:20,274 INFO [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2->hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6971c7b12714adabba25079dbd8fdf8-top, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-3d976e91367d4e62bfeae47b85ccddf0, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-4515bdbac98a45f3a533fcbb892d5f3d] into tmpdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp, totalSize=116.0 K 2024-12-02T21:31:20,274 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:20,274 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:20,274 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c. 2024-12-02T21:31:20,274 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => cc8225a2b44932338372e937911a920c, NAME => 'TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-02T21:31:20,274 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] compactions.Compactor(225): Compacting a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733175075104 2024-12-02T21:31:20,275 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling cc8225a2b44932338372e937911a920c 2024-12-02T21:31:20,275 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:20,275 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=d725ced47985accfdccea3024939292e, regionState=OPEN, openSeqNum=131, regionLocation=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:20,275 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for cc8225a2b44932338372e937911a920c 2024-12-02T21:31:20,275 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-3d976e91367d4e62bfeae47b85ccddf0, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733175079285 2024-12-02T21:31:20,275 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for cc8225a2b44932338372e937911a920c 2024-12-02T21:31:20,275 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-4515bdbac98a45f3a533fcbb892d5f3d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733175079311 2024-12-02T21:31:20,276 INFO [StoreOpener-cc8225a2b44932338372e937911a920c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cc8225a2b44932338372e937911a920c 2024-12-02T21:31:20,276 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-02T21:31:20,277 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-02T21:31:20,277 INFO [StoreOpener-cc8225a2b44932338372e937911a920c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc8225a2b44932338372e937911a920c columnFamilyName info 2024-12-02T21:31:20,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-02T21:31:20,277 DEBUG [StoreOpener-cc8225a2b44932338372e937911a920c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:20,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure d725ced47985accfdccea3024939292e, server=87c3fdb6c570,34593,1733175063928 because future has completed 2024-12-02T21:31:20,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-12-02T21:31:20,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure d725ced47985accfdccea3024939292e, server=87c3fdb6c570,34593,1733175063928 in 204 msec 2024-12-02T21:31:20,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d725ced47985accfdccea3024939292e, ASSIGN in 366 msec 2024-12-02T21:31:20,286 DEBUG [StoreOpener-cc8225a2b44932338372e937911a920c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2->hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6971c7b12714adabba25079dbd8fdf8-bottom 2024-12-02T21:31:20,286 INFO [StoreOpener-cc8225a2b44932338372e937911a920c-1 {}] regionserver.HStore(327): Store=cc8225a2b44932338372e937911a920c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:31:20,287 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for cc8225a2b44932338372e937911a920c 2024-12-02T21:31:20,288 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c 2024-12-02T21:31:20,292 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c 2024-12-02T21:31:20,293 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for cc8225a2b44932338372e937911a920c 2024-12-02T21:31:20,293 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for cc8225a2b44932338372e937911a920c 2024-12-02T21:31:20,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/info/510bacee9dfd46da8a3ec7eb50c00e65 is 193, key is TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e./info:regioninfo/1733175080275/Put/seqid=0 2024-12-02T21:31:20,295 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for cc8225a2b44932338372e937911a920c 2024-12-02T21:31:20,296 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened cc8225a2b44932338372e937911a920c; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799095, jitterRate=0.016103044152259827}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:31:20,296 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cc8225a2b44932338372e937911a920c 2024-12-02T21:31:20,296 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for cc8225a2b44932338372e937911a920c: Running coprocessor pre-open hook at 1733175080275Writing region info on filesystem at 1733175080275Initializing all the Stores at 1733175080275Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175080276 (+1 ms)Cleaning up temporary data from old regions at 1733175080293 (+17 ms)Running coprocessor post-open hooks at 1733175080296 (+3 ms)Region opened successfully at 1733175080296 2024-12-02T21:31:20,297 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c., pid=12, masterSystemTime=1733175080225 2024-12-02T21:31:20,297 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store cc8225a2b44932338372e937911a920c:info, priority=-2147483648, current under compaction store size is 2 2024-12-02T21:31:20,297 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:20,297 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-02T21:31:20,298 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c. 2024-12-02T21:31:20,298 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1541): cc8225a2b44932338372e937911a920c/info is initiating minor compaction (all files) 2024-12-02T21:31:20,298 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of cc8225a2b44932338372e937911a920c/info in TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c. 2024-12-02T21:31:20,298 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2->hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6971c7b12714adabba25079dbd8fdf8-bottom] into tmpdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/.tmp, totalSize=86.3 K 2024-12-02T21:31:20,299 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733175075104 2024-12-02T21:31:20,299 DEBUG [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c. 2024-12-02T21:31:20,300 INFO [RS_OPEN_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c. 2024-12-02T21:31:20,301 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=cc8225a2b44932338372e937911a920c, regionState=OPEN, openSeqNum=131, regionLocation=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:20,301 INFO [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d725ced47985accfdccea3024939292e#info#compaction#68 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:31:20,301 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/611b47174d624f8bb1d5792f143f70bd is 1080, key is row0062/info:/1733175077244/Put/seqid=0 2024-12-02T21:31:20,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure cc8225a2b44932338372e937911a920c, server=87c3fdb6c570,34593,1733175063928 because future has completed 2024-12-02T21:31:20,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741853_1029 (size=9882) 2024-12-02T21:31:20,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741853_1029 (size=9882) 2024-12-02T21:31:20,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-02T21:31:20,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure cc8225a2b44932338372e937911a920c, server=87c3fdb6c570,34593,1733175063928 in 233 msec 2024-12-02T21:31:20,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741854_1030 (size=42984) 2024-12-02T21:31:20,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/info/510bacee9dfd46da8a3ec7eb50c00e65 2024-12-02T21:31:20,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741854_1030 (size=42984) 2024-12-02T21:31:20,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-12-02T21:31:20,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cc8225a2b44932338372e937911a920c, ASSIGN in 391 msec 2024-12-02T21:31:20,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4969b595c3fd7103c37e1a4903c80ee2, daughterA=cc8225a2b44932338372e937911a920c, daughterB=d725ced47985accfdccea3024939292e in 954 msec 2024-12-02T21:31:20,315 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/611b47174d624f8bb1d5792f143f70bd as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/611b47174d624f8bb1d5792f143f70bd 2024-12-02T21:31:20,320 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc8225a2b44932338372e937911a920c#info#compaction#69 average throughput is 62.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:31:20,321 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/.tmp/info/e8c0a4db43c24d66a77f5c71f44ca1bf is 1080, key is row0001/info:/1733175075104/Put/seqid=0 2024-12-02T21:31:20,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/ns/f2a6515b269a4510ac6e7cd5f057181c is 43, key is default/ns:d/1733175064970/Put/seqid=0 2024-12-02T21:31:20,328 INFO [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d725ced47985accfdccea3024939292e/info of d725ced47985accfdccea3024939292e into 611b47174d624f8bb1d5792f143f70bd(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:31:20,328 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:20,328 INFO [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., storeName=d725ced47985accfdccea3024939292e/info, priority=13, startTime=1733175080272; duration=0sec 2024-12-02T21:31:20,328 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:20,328 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d725ced47985accfdccea3024939292e:info 2024-12-02T21:31:20,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741855_1031 (size=70862) 2024-12-02T21:31:20,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741855_1031 (size=70862) 2024-12-02T21:31:20,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741856_1032 (size=5153) 2024-12-02T21:31:20,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741856_1032 (size=5153) 2024-12-02T21:31:20,332 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/ns/f2a6515b269a4510ac6e7cd5f057181c 2024-12-02T21:31:20,335 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/.tmp/info/e8c0a4db43c24d66a77f5c71f44ca1bf as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/info/e8c0a4db43c24d66a77f5c71f44ca1bf 2024-12-02T21:31:20,341 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in cc8225a2b44932338372e937911a920c/info of cc8225a2b44932338372e937911a920c into e8c0a4db43c24d66a77f5c71f44ca1bf(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:31:20,341 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for cc8225a2b44932338372e937911a920c: 2024-12-02T21:31:20,341 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c., storeName=cc8225a2b44932338372e937911a920c/info, priority=15, startTime=1733175080297; duration=0sec 2024-12-02T21:31:20,341 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:20,341 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc8225a2b44932338372e937911a920c:info 2024-12-02T21:31:20,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/table/b86845bf9e7b4daea13074df43b112de is 65, key is TestLogRolling-testLogRolling/table:state/1733175065451/Put/seqid=0 2024-12-02T21:31:20,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741857_1033 (size=5340) 2024-12-02T21:31:20,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741857_1033 (size=5340) 2024-12-02T21:31:20,359 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/table/b86845bf9e7b4daea13074df43b112de 2024-12-02T21:31:20,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/info/510bacee9dfd46da8a3ec7eb50c00e65 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/info/510bacee9dfd46da8a3ec7eb50c00e65 2024-12-02T21:31:20,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/info/510bacee9dfd46da8a3ec7eb50c00e65, entries=30, sequenceid=17, filesize=9.7 K 2024-12-02T21:31:20,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/ns/f2a6515b269a4510ac6e7cd5f057181c as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/ns/f2a6515b269a4510ac6e7cd5f057181c 2024-12-02T21:31:20,376 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/ns/f2a6515b269a4510ac6e7cd5f057181c, entries=2, sequenceid=17, filesize=5.0 K 2024-12-02T21:31:20,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/table/b86845bf9e7b4daea13074df43b112de as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/table/b86845bf9e7b4daea13074df43b112de 2024-12-02T21:31:20,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/table/b86845bf9e7b4daea13074df43b112de, entries=2, sequenceid=17, filesize=5.2 K 2024-12-02T21:31:20,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 106ms, sequenceid=17, compaction requested=false 2024-12-02T21:31:20,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-02T21:31:20,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:20,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:21,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:57576 deadline: 1733175091328, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. is not online on 87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:21,356 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2., hostname=87c3fdb6c570,34593,1733175063928, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2., hostname=87c3fdb6c570,34593,1733175063928, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. is not online on 87c3fdb6c570,34593,1733175063928 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T21:31:21,357 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2., hostname=87c3fdb6c570,34593,1733175063928, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2. is not online on 87c3fdb6c570,34593,1733175063928 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T21:31:21,357 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733175065066.4969b595c3fd7103c37e1a4903c80ee2., hostname=87c3fdb6c570,34593,1733175063928, seqNum=2 from cache 2024-12-02T21:31:21,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:21,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:22,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:22,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:23,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:23,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:24,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:24,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:24,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:24,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,335 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:31:25,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,359 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,359 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:25,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:25,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:26,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:26,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:27,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:27,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:28,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:28,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:29,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:29,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:30,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:30,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:31,414 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., hostname=87c3fdb6c570,34593,1733175063928, seqNum=131] 2024-12-02T21:31:31,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:31,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:31:31,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/93b2234fd8154620bc6a1a7a27cb88fe is 1080, key is row0097/info:/1733175091415/Put/seqid=0 2024-12-02T21:31:31,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741858_1034 (size=12516) 2024-12-02T21:31:31,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741858_1034 (size=12516) 2024-12-02T21:31:31,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/93b2234fd8154620bc6a1a7a27cb88fe 2024-12-02T21:31:31,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/93b2234fd8154620bc6a1a7a27cb88fe as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/93b2234fd8154620bc6a1a7a27cb88fe 2024-12-02T21:31:31,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/93b2234fd8154620bc6a1a7a27cb88fe, entries=7, sequenceid=141, filesize=12.2 K 2024-12-02T21:31:31,451 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for d725ced47985accfdccea3024939292e in 23ms, sequenceid=141, compaction requested=false 2024-12-02T21:31:31,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:31,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:31,452 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-02T21:31:31,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/962cd0bdacab406d91fc126944a06ad2 is 1080, key is row0104/info:/1733175091428/Put/seqid=0 2024-12-02T21:31:31,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741859_1035 (size=19000) 2024-12-02T21:31:31,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741859_1035 (size=19000) 2024-12-02T21:31:31,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/962cd0bdacab406d91fc126944a06ad2 2024-12-02T21:31:31,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/962cd0bdacab406d91fc126944a06ad2 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/962cd0bdacab406d91fc126944a06ad2 2024-12-02T21:31:31,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/962cd0bdacab406d91fc126944a06ad2, entries=13, sequenceid=157, filesize=18.6 K 2024-12-02T21:31:31,488 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for d725ced47985accfdccea3024939292e in 36ms, sequenceid=157, compaction requested=true 2024-12-02T21:31:31,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:31,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d725ced47985accfdccea3024939292e:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:31:31,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:31,488 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:31:31,489 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74500 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:31:31,489 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HStore(1541): d725ced47985accfdccea3024939292e/info is initiating minor compaction (all files) 2024-12-02T21:31:31,489 INFO [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d725ced47985accfdccea3024939292e/info in TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:31,490 INFO [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/611b47174d624f8bb1d5792f143f70bd, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/93b2234fd8154620bc6a1a7a27cb88fe, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/962cd0bdacab406d91fc126944a06ad2] into tmpdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp, totalSize=72.8 K 2024-12-02T21:31:31,490 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] compactions.Compactor(225): Compacting 611b47174d624f8bb1d5792f143f70bd, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733175077244 2024-12-02T21:31:31,490 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] compactions.Compactor(225): Compacting 93b2234fd8154620bc6a1a7a27cb88fe, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733175091415 2024-12-02T21:31:31,490 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] compactions.Compactor(225): Compacting 962cd0bdacab406d91fc126944a06ad2, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733175091428 2024-12-02T21:31:31,499 INFO [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d725ced47985accfdccea3024939292e#info#compaction#74 average throughput is 56.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:31:31,499 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/4a41070733ce449b8e3f98b9401ca346 is 1080, key is row0062/info:/1733175077244/Put/seqid=0 2024-12-02T21:31:31,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741860_1036 (size=64714) 2024-12-02T21:31:31,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741860_1036 (size=64714) 2024-12-02T21:31:31,509 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/4a41070733ce449b8e3f98b9401ca346 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/4a41070733ce449b8e3f98b9401ca346 2024-12-02T21:31:31,515 INFO [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d725ced47985accfdccea3024939292e/info of d725ced47985accfdccea3024939292e into 4a41070733ce449b8e3f98b9401ca346(size=63.2 K), total size for store is 63.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:31:31,515 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:31,515 INFO [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., storeName=d725ced47985accfdccea3024939292e/info, priority=13, startTime=1733175091488; duration=0sec 2024-12-02T21:31:31,515 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:31,515 DEBUG [RS:0;87c3fdb6c570:34593-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d725ced47985accfdccea3024939292e:info 2024-12-02T21:31:31,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:31,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:32,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:32,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:33,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:33,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-02T21:31:33,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d725ced47985accfdccea3024939292e, server=87c3fdb6c570,34593,1733175063928 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-02T21:31:33,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:57576 deadline: 1733175103508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d725ced47985accfdccea3024939292e, server=87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:33,510 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., hostname=87c3fdb6c570,34593,1733175063928, seqNum=131 , the old value is region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., hostname=87c3fdb6c570,34593,1733175063928, seqNum=131, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d725ced47985accfdccea3024939292e, server=87c3fdb6c570,34593,1733175063928 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T21:31:33,510 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., hostname=87c3fdb6c570,34593,1733175063928, seqNum=131 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d725ced47985accfdccea3024939292e, server=87c3fdb6c570,34593,1733175063928 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T21:31:33,510 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., hostname=87c3fdb6c570,34593,1733175063928, seqNum=131 because the exception is null or not the one we care about 2024-12-02T21:31:33,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/33dfcde2e9f04f88a73f1c59b060ac59 is 1080, key is row0117/info:/1733175091453/Put/seqid=0 2024-12-02T21:31:33,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741861_1037 (size=19000) 2024-12-02T21:31:33,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741861_1037 (size=19000) 2024-12-02T21:31:33,526 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/33dfcde2e9f04f88a73f1c59b060ac59 2024-12-02T21:31:33,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/33dfcde2e9f04f88a73f1c59b060ac59 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/33dfcde2e9f04f88a73f1c59b060ac59 2024-12-02T21:31:33,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/33dfcde2e9f04f88a73f1c59b060ac59, entries=13, sequenceid=174, filesize=18.6 K 2024-12-02T21:31:33,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for d725ced47985accfdccea3024939292e in 61ms, sequenceid=174, compaction requested=false 2024-12-02T21:31:33,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:33,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:33,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:33,666 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:31:34,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:34,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:35,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:35,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:36,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:36,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:37,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:37,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:38,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:38,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:39,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:39,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:40,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:40,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:41,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:41,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:42,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:42,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:43,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:43,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:43,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:43,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-02T21:31:43,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/a64f38687853482290f7b84bfdba6c02 is 1080, key is row0130/info:/1733175093479/Put/seqid=0 2024-12-02T21:31:43,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741862_1038 (size=23316) 2024-12-02T21:31:43,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741862_1038 (size=23316) 2024-12-02T21:31:43,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/a64f38687853482290f7b84bfdba6c02 2024-12-02T21:31:43,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/a64f38687853482290f7b84bfdba6c02 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a64f38687853482290f7b84bfdba6c02 2024-12-02T21:31:43,634 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a64f38687853482290f7b84bfdba6c02, entries=17, sequenceid=194, filesize=22.8 K 2024-12-02T21:31:43,635 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=12.61 KB/12912 for d725ced47985accfdccea3024939292e in 30ms, sequenceid=194, compaction requested=true 2024-12-02T21:31:43,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:43,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d725ced47985accfdccea3024939292e:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:31:43,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:43,635 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:31:43,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:43,637 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 107030 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:31:43,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-02T21:31:43,637 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1541): d725ced47985accfdccea3024939292e/info is initiating minor compaction (all files) 2024-12-02T21:31:43,637 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d725ced47985accfdccea3024939292e/info in TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:43,637 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/4a41070733ce449b8e3f98b9401ca346, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/33dfcde2e9f04f88a73f1c59b060ac59, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a64f38687853482290f7b84bfdba6c02] into tmpdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp, totalSize=104.5 K 2024-12-02T21:31:43,638 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4a41070733ce449b8e3f98b9401ca346, keycount=55, bloomtype=ROW, size=63.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733175077244 2024-12-02T21:31:43,638 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 33dfcde2e9f04f88a73f1c59b060ac59, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733175091453 2024-12-02T21:31:43,638 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting a64f38687853482290f7b84bfdba6c02, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733175093479 2024-12-02T21:31:43,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/a8e444519a8c413cafbe54cff22f2f35 is 1080, key is row0147/info:/1733175103609/Put/seqid=0 2024-12-02T21:31:43,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741863_1039 (size=19000) 2024-12-02T21:31:43,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741863_1039 (size=19000) 2024-12-02T21:31:43,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/a8e444519a8c413cafbe54cff22f2f35 2024-12-02T21:31:43,649 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d725ced47985accfdccea3024939292e#info#compaction#78 average throughput is 43.61 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:31:43,650 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/9f6f0358e0c143c387930ceb4c38280d is 1080, key is row0062/info:/1733175077244/Put/seqid=0 2024-12-02T21:31:43,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/a8e444519a8c413cafbe54cff22f2f35 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a8e444519a8c413cafbe54cff22f2f35 2024-12-02T21:31:43,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741864_1040 (size=97233) 2024-12-02T21:31:43,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741864_1040 (size=97233) 2024-12-02T21:31:43,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a8e444519a8c413cafbe54cff22f2f35, entries=13, sequenceid=210, filesize=18.6 K 2024-12-02T21:31:43,658 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for d725ced47985accfdccea3024939292e in 22ms, sequenceid=210, compaction requested=false 2024-12-02T21:31:43,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:43,659 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/9f6f0358e0c143c387930ceb4c38280d as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/9f6f0358e0c143c387930ceb4c38280d 2024-12-02T21:31:43,664 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d725ced47985accfdccea3024939292e/info of d725ced47985accfdccea3024939292e into 9f6f0358e0c143c387930ceb4c38280d(size=95.0 K), total size for store is 113.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:31:43,664 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:43,664 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., storeName=d725ced47985accfdccea3024939292e/info, priority=13, startTime=1733175103635; duration=0sec 2024-12-02T21:31:43,664 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:43,664 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d725ced47985accfdccea3024939292e:info 2024-12-02T21:31:44,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:44,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:44,985 INFO [master/87c3fdb6c570:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T21:31:44,986 INFO [master/87c3fdb6c570:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T21:31:45,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:45,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:45,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:45,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:31:45,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/9f83ccc83cff446eb1b0490cd637b90f is 1080, key is row0160/info:/1733175103638/Put/seqid=0 2024-12-02T21:31:45,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741865_1041 (size=12516) 2024-12-02T21:31:45,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741865_1041 (size=12516) 2024-12-02T21:31:45,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/9f83ccc83cff446eb1b0490cd637b90f 2024-12-02T21:31:45,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/9f83ccc83cff446eb1b0490cd637b90f as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/9f83ccc83cff446eb1b0490cd637b90f 2024-12-02T21:31:45,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/9f83ccc83cff446eb1b0490cd637b90f, entries=7, sequenceid=221, filesize=12.2 K 2024-12-02T21:31:45,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for d725ced47985accfdccea3024939292e in 35ms, sequenceid=221, compaction requested=true 2024-12-02T21:31:45,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:45,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d725ced47985accfdccea3024939292e:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:31:45,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:45,692 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:31:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:45,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-02T21:31:45,693 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128749 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:31:45,693 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1541): d725ced47985accfdccea3024939292e/info is initiating minor compaction (all files) 2024-12-02T21:31:45,693 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d725ced47985accfdccea3024939292e/info in TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:45,693 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/9f6f0358e0c143c387930ceb4c38280d, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a8e444519a8c413cafbe54cff22f2f35, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/9f83ccc83cff446eb1b0490cd637b90f] into tmpdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp, totalSize=125.7 K 2024-12-02T21:31:45,694 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f6f0358e0c143c387930ceb4c38280d, keycount=85, bloomtype=ROW, size=95.0 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733175077244 2024-12-02T21:31:45,694 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting a8e444519a8c413cafbe54cff22f2f35, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733175103609 2024-12-02T21:31:45,695 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f83ccc83cff446eb1b0490cd637b90f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733175103638 2024-12-02T21:31:45,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/20c766714ded4a248dff2e0d4f3e7bdf is 1080, key is row0167/info:/1733175105657/Put/seqid=0 2024-12-02T21:31:45,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741866_1042 (size=27628) 2024-12-02T21:31:45,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741866_1042 (size=27628) 2024-12-02T21:31:45,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/20c766714ded4a248dff2e0d4f3e7bdf 2024-12-02T21:31:45,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/20c766714ded4a248dff2e0d4f3e7bdf as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/20c766714ded4a248dff2e0d4f3e7bdf 2024-12-02T21:31:45,707 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d725ced47985accfdccea3024939292e#info#compaction#81 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:31:45,708 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/7a124b5df6754b41a7497d21d0bca255 is 1080, key is row0062/info:/1733175077244/Put/seqid=0 2024-12-02T21:31:45,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741867_1043 (size=118899) 2024-12-02T21:31:45,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741867_1043 (size=118899) 2024-12-02T21:31:45,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/20c766714ded4a248dff2e0d4f3e7bdf, entries=21, sequenceid=245, filesize=27.0 K 2024-12-02T21:31:45,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=5.25 KB/5380 for d725ced47985accfdccea3024939292e in 20ms, sequenceid=245, compaction requested=false 2024-12-02T21:31:45,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:45,716 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/7a124b5df6754b41a7497d21d0bca255 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/7a124b5df6754b41a7497d21d0bca255 2024-12-02T21:31:45,722 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d725ced47985accfdccea3024939292e/info of d725ced47985accfdccea3024939292e into 7a124b5df6754b41a7497d21d0bca255(size=116.1 K), total size for store is 143.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:31:45,722 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:45,722 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., storeName=d725ced47985accfdccea3024939292e/info, priority=13, startTime=1733175105692; duration=0sec 2024-12-02T21:31:45,722 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:45,722 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d725ced47985accfdccea3024939292e:info 2024-12-02T21:31:46,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:46,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:47,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:47,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:47,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:47,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:31:47,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/4689ca1957964eb783d91510cb5b6518 is 1080, key is row0188/info:/1733175105693/Put/seqid=0 2024-12-02T21:31:47,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741868_1044 (size=12516) 2024-12-02T21:31:47,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741868_1044 (size=12516) 2024-12-02T21:31:47,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/4689ca1957964eb783d91510cb5b6518 2024-12-02T21:31:47,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/4689ca1957964eb783d91510cb5b6518 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/4689ca1957964eb783d91510cb5b6518 2024-12-02T21:31:47,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/4689ca1957964eb783d91510cb5b6518, entries=7, sequenceid=256, filesize=12.2 K 2024-12-02T21:31:47,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for d725ced47985accfdccea3024939292e in 25ms, sequenceid=256, compaction requested=true 2024-12-02T21:31:47,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:47,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d725ced47985accfdccea3024939292e:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:31:47,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:47,735 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:31:47,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:47,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-02T21:31:47,736 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 159043 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:31:47,736 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1541): d725ced47985accfdccea3024939292e/info is initiating minor compaction (all files) 2024-12-02T21:31:47,736 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d725ced47985accfdccea3024939292e/info in TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:47,736 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/7a124b5df6754b41a7497d21d0bca255, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/20c766714ded4a248dff2e0d4f3e7bdf, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/4689ca1957964eb783d91510cb5b6518] into tmpdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp, totalSize=155.3 K 2024-12-02T21:31:47,737 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7a124b5df6754b41a7497d21d0bca255, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733175077244 2024-12-02T21:31:47,737 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 20c766714ded4a248dff2e0d4f3e7bdf, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733175105657 2024-12-02T21:31:47,737 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4689ca1957964eb783d91510cb5b6518, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733175105693 2024-12-02T21:31:47,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/7c0641a4c59a480d8a5f2dfd274427fc is 1080, key is row0195/info:/1733175107711/Put/seqid=0 2024-12-02T21:31:47,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741869_1045 (size=17918) 2024-12-02T21:31:47,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741869_1045 (size=17918) 2024-12-02T21:31:47,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/7c0641a4c59a480d8a5f2dfd274427fc 2024-12-02T21:31:47,751 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d725ced47985accfdccea3024939292e#info#compaction#84 average throughput is 34.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:31:47,752 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/ebc3099e182146ae81707f3239808485 is 1080, key is row0062/info:/1733175077244/Put/seqid=0 2024-12-02T21:31:47,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/7c0641a4c59a480d8a5f2dfd274427fc as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/7c0641a4c59a480d8a5f2dfd274427fc 2024-12-02T21:31:47,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741870_1046 (size=149390) 2024-12-02T21:31:47,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741870_1046 (size=149390) 2024-12-02T21:31:47,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/7c0641a4c59a480d8a5f2dfd274427fc, entries=12, sequenceid=271, filesize=17.5 K 2024-12-02T21:31:47,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for d725ced47985accfdccea3024939292e in 27ms, sequenceid=271, compaction requested=false 2024-12-02T21:31:47,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:47,763 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/ebc3099e182146ae81707f3239808485 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/ebc3099e182146ae81707f3239808485 2024-12-02T21:31:47,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:47,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-02T21:31:47,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/94d1c1477f834378b1160501334c6991 is 1080, key is row0207/info:/1733175107736/Put/seqid=0 2024-12-02T21:31:47,771 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d725ced47985accfdccea3024939292e/info of d725ced47985accfdccea3024939292e into ebc3099e182146ae81707f3239808485(size=145.9 K), total size for store is 163.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:31:47,771 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:47,771 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., storeName=d725ced47985accfdccea3024939292e/info, priority=13, startTime=1733175107735; duration=0sec 2024-12-02T21:31:47,771 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:47,771 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d725ced47985accfdccea3024939292e:info 2024-12-02T21:31:47,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741871_1047 (size=17918) 2024-12-02T21:31:47,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741871_1047 (size=17918) 2024-12-02T21:31:47,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/94d1c1477f834378b1160501334c6991 2024-12-02T21:31:47,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/94d1c1477f834378b1160501334c6991 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/94d1c1477f834378b1160501334c6991 2024-12-02T21:31:47,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/94d1c1477f834378b1160501334c6991, entries=12, sequenceid=286, filesize=17.5 K 2024-12-02T21:31:47,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=6.30 KB/6456 for d725ced47985accfdccea3024939292e in 20ms, sequenceid=286, compaction requested=true 2024-12-02T21:31:47,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:47,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d725ced47985accfdccea3024939292e:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:31:47,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:47,784 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:31:47,785 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 185226 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:31:47,785 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1541): d725ced47985accfdccea3024939292e/info is initiating minor compaction (all files) 2024-12-02T21:31:47,785 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d725ced47985accfdccea3024939292e/info in TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:47,786 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/ebc3099e182146ae81707f3239808485, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/7c0641a4c59a480d8a5f2dfd274427fc, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/94d1c1477f834378b1160501334c6991] into tmpdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp, totalSize=180.9 K 2024-12-02T21:31:47,786 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting ebc3099e182146ae81707f3239808485, keycount=133, bloomtype=ROW, size=145.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733175077244 2024-12-02T21:31:47,786 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c0641a4c59a480d8a5f2dfd274427fc, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733175107711 2024-12-02T21:31:47,786 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 94d1c1477f834378b1160501334c6991, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733175107736 2024-12-02T21:31:47,799 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d725ced47985accfdccea3024939292e#info#compaction#86 average throughput is 40.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:31:47,799 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/e62126cc0da94383bc87336e3af7ab72 is 1080, key is row0062/info:/1733175077244/Put/seqid=0 2024-12-02T21:31:47,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741872_1048 (size=175396) 2024-12-02T21:31:47,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741872_1048 (size=175396) 2024-12-02T21:31:47,807 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/e62126cc0da94383bc87336e3af7ab72 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/e62126cc0da94383bc87336e3af7ab72 2024-12-02T21:31:47,813 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d725ced47985accfdccea3024939292e/info of d725ced47985accfdccea3024939292e into e62126cc0da94383bc87336e3af7ab72(size=171.3 K), total size for store is 171.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:31:47,813 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:47,813 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., storeName=d725ced47985accfdccea3024939292e/info, priority=13, startTime=1733175107784; duration=0sec 2024-12-02T21:31:47,813 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:47,813 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d725ced47985accfdccea3024939292e:info 2024-12-02T21:31:48,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:48,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:49,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:49,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:49,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:31:49,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/8c1996cb870f4fc1997bf113d3f63bfa is 1080, key is row0219/info:/1733175107765/Put/seqid=0 2024-12-02T21:31:49,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741873_1049 (size=12523) 2024-12-02T21:31:49,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741873_1049 (size=12523) 2024-12-02T21:31:49,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/8c1996cb870f4fc1997bf113d3f63bfa 2024-12-02T21:31:49,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/8c1996cb870f4fc1997bf113d3f63bfa as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/8c1996cb870f4fc1997bf113d3f63bfa 2024-12-02T21:31:49,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/8c1996cb870f4fc1997bf113d3f63bfa, entries=7, sequenceid=298, filesize=12.2 K 2024-12-02T21:31:49,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for d725ced47985accfdccea3024939292e in 25ms, sequenceid=298, compaction requested=false 2024-12-02T21:31:49,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:49,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-02T21:31:49,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/b38a173c47664f70b331f5af226684a0 is 1080, key is row0226/info:/1733175109783/Put/seqid=0 2024-12-02T21:31:49,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741874_1050 (size=17918) 2024-12-02T21:31:49,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741874_1050 (size=17918) 2024-12-02T21:31:49,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/b38a173c47664f70b331f5af226684a0 2024-12-02T21:31:49,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/b38a173c47664f70b331f5af226684a0 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/b38a173c47664f70b331f5af226684a0 2024-12-02T21:31:49,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/b38a173c47664f70b331f5af226684a0, entries=12, sequenceid=313, filesize=17.5 K 2024-12-02T21:31:49,829 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for d725ced47985accfdccea3024939292e in 21ms, sequenceid=313, compaction requested=true 2024-12-02T21:31:49,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:49,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d725ced47985accfdccea3024939292e:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:31:49,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:49,829 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:31:49,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34593 {}] regionserver.HRegion(8855): Flush requested on d725ced47985accfdccea3024939292e 2024-12-02T21:31:49,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-02T21:31:49,830 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 205837 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:31:49,830 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1541): d725ced47985accfdccea3024939292e/info is initiating minor compaction (all files) 2024-12-02T21:31:49,831 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d725ced47985accfdccea3024939292e/info in TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:49,831 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/e62126cc0da94383bc87336e3af7ab72, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/8c1996cb870f4fc1997bf113d3f63bfa, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/b38a173c47664f70b331f5af226684a0] into tmpdir=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp, totalSize=201.0 K 2024-12-02T21:31:49,831 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting e62126cc0da94383bc87336e3af7ab72, keycount=157, bloomtype=ROW, size=171.3 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733175077244 2024-12-02T21:31:49,832 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8c1996cb870f4fc1997bf113d3f63bfa, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733175107765 2024-12-02T21:31:49,832 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] compactions.Compactor(225): Compacting b38a173c47664f70b331f5af226684a0, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733175109783 2024-12-02T21:31:49,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/638fd25b3dd34fcba600468d631f7c06 is 1080, key is row0238/info:/1733175109810/Put/seqid=0 2024-12-02T21:31:49,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741875_1051 (size=16839) 2024-12-02T21:31:49,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741875_1051 (size=16839) 2024-12-02T21:31:49,842 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/638fd25b3dd34fcba600468d631f7c06 2024-12-02T21:31:49,845 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d725ced47985accfdccea3024939292e#info#compaction#90 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:31:49,846 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/bb70639979ad4d149ecde77c0a95395b is 1080, key is row0062/info:/1733175077244/Put/seqid=0 2024-12-02T21:31:49,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741876_1052 (size=196003) 2024-12-02T21:31:49,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741876_1052 (size=196003) 2024-12-02T21:31:49,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/638fd25b3dd34fcba600468d631f7c06 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/638fd25b3dd34fcba600468d631f7c06 2024-12-02T21:31:49,855 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/bb70639979ad4d149ecde77c0a95395b as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/bb70639979ad4d149ecde77c0a95395b 2024-12-02T21:31:49,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/638fd25b3dd34fcba600468d631f7c06, entries=11, sequenceid=327, filesize=16.4 K 2024-12-02T21:31:49,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for d725ced47985accfdccea3024939292e in 27ms, sequenceid=327, compaction requested=false 2024-12-02T21:31:49,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:49,860 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d725ced47985accfdccea3024939292e/info of d725ced47985accfdccea3024939292e into bb70639979ad4d149ecde77c0a95395b(size=191.4 K), total size for store is 207.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:31:49,860 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:49,860 INFO [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., storeName=d725ced47985accfdccea3024939292e/info, priority=13, startTime=1733175109829; duration=0sec 2024-12-02T21:31:49,860 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:31:49,860 DEBUG [RS:0;87c3fdb6c570:34593-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d725ced47985accfdccea3024939292e:info 2024-12-02T21:31:49,900 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-02T21:31:50,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:50,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:51,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:51,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:51,847 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-02T21:31:51,848 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C34593%2C1733175063928.1733175111848 2024-12-02T21:31:51,858 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:51,858 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:51,858 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:51,858 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:51,858 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:51,858 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/WALs/87c3fdb6c570,34593,1733175063928/87c3fdb6c570%2C34593%2C1733175063928.1733175064554 with entries=315, filesize=309.33 KB; new WAL /user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/WALs/87c3fdb6c570,34593,1733175063928/87c3fdb6c570%2C34593%2C1733175063928.1733175111848 2024-12-02T21:31:51,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741833_1009 (size=316763) 2024-12-02T21:31:51,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741833_1009 (size=316763) 2024-12-02T21:31:51,864 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45971:45971),(127.0.0.1/127.0.0.1:37697:37697)] 2024-12-02T21:31:51,867 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d725ced47985accfdccea3024939292e 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-02T21:31:51,871 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/f59569a99fc1496d92e36d3133e36457 is 1080, key is row0249/info:/1733175109831/Put/seqid=0 2024-12-02T21:31:51,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741878_1054 (size=13602) 2024-12-02T21:31:51,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741878_1054 (size=13602) 2024-12-02T21:31:51,875 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/f59569a99fc1496d92e36d3133e36457 2024-12-02T21:31:51,879 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/.tmp/info/f59569a99fc1496d92e36d3133e36457 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/f59569a99fc1496d92e36d3133e36457 2024-12-02T21:31:51,884 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/f59569a99fc1496d92e36d3133e36457, entries=8, sequenceid=339, filesize=13.3 K 2024-12-02T21:31:51,885 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for d725ced47985accfdccea3024939292e in 18ms, sequenceid=339, compaction requested=true 2024-12-02T21:31:51,885 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d725ced47985accfdccea3024939292e: 2024-12-02T21:31:51,885 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-02T21:31:51,889 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/info/bd47599ffa634a8bbf545731b5d494af is 186, key is TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c./info:regioninfo/1733175080300/Put/seqid=0 2024-12-02T21:31:51,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741879_1055 (size=6153) 2024-12-02T21:31:51,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741879_1055 (size=6153) 2024-12-02T21:31:51,893 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/info/bd47599ffa634a8bbf545731b5d494af 2024-12-02T21:31:51,898 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/.tmp/info/bd47599ffa634a8bbf545731b5d494af as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/info/bd47599ffa634a8bbf545731b5d494af 2024-12-02T21:31:51,902 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/info/bd47599ffa634a8bbf545731b5d494af, entries=5, sequenceid=21, filesize=6.0 K 2024-12-02T21:31:51,903 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 18ms, sequenceid=21, compaction requested=false 2024-12-02T21:31:51,903 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-02T21:31:51,904 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for cc8225a2b44932338372e937911a920c: 2024-12-02T21:31:51,904 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C34593%2C1733175063928.1733175111904 2024-12-02T21:31:51,908 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:51,908 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:51,908 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:51,908 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:51,908 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:51,908 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/WALs/87c3fdb6c570,34593,1733175063928/87c3fdb6c570%2C34593%2C1733175063928.1733175111848 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/WALs/87c3fdb6c570,34593,1733175063928/87c3fdb6c570%2C34593%2C1733175063928.1733175111904 2024-12-02T21:31:51,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741877_1053 (size=731) 2024-12-02T21:31:51,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741877_1053 (size=731) 2024-12-02T21:31:51,914 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/WALs/87c3fdb6c570,34593,1733175063928/87c3fdb6c570%2C34593%2C1733175063928.1733175064554 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/oldWALs/87c3fdb6c570%2C34593%2C1733175063928.1733175064554 2024-12-02T21:31:51,914 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37697:37697),(127.0.0.1/127.0.0.1:45971:45971)] 2024-12-02T21:31:51,915 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/WALs/87c3fdb6c570,34593,1733175063928/87c3fdb6c570%2C34593%2C1733175063928.1733175111848 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/oldWALs/87c3fdb6c570%2C34593%2C1733175063928.1733175111848 2024-12-02T21:31:51,915 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T21:31:51,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T21:31:51,915 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:31:51,915 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:31:51,915 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:51,915 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:51,915 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T21:31:51,916 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:31:51,916 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1507214946, stopped=false 2024-12-02T21:31:51,916 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87c3fdb6c570,37427,1733175063707 2024-12-02T21:31:51,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:31:51,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:31:51,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:51,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:51,965 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:31:51,966 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:31:51,966 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:31:51,966 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:51,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:31:51,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:31:51,966 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87c3fdb6c570,34593,1733175063928' ***** 2024-12-02T21:31:51,967 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T21:31:51,967 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:31:51,967 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T21:31:51,967 INFO [RS:0;87c3fdb6c570:34593 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:31:51,967 INFO [RS:0;87c3fdb6c570:34593 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:31:51,968 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(3091): Received CLOSE for d725ced47985accfdccea3024939292e 2024-12-02T21:31:51,968 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(3091): Received CLOSE for cc8225a2b44932338372e937911a920c 2024-12-02T21:31:51,968 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(959): stopping server 87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:51,968 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:31:51,968 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d725ced47985accfdccea3024939292e, disabling compactions & flushes 2024-12-02T21:31:51,968 INFO [RS:0;87c3fdb6c570:34593 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87c3fdb6c570:34593. 2024-12-02T21:31:51,968 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:51,968 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:51,968 DEBUG [RS:0;87c3fdb6c570:34593 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:31:51,968 DEBUG [RS:0;87c3fdb6c570:34593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:51,968 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. after waiting 0 ms 2024-12-02T21:31:51,968 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:51,968 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:31:51,968 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:31:51,968 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:31:51,968 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T21:31:51,969 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-02T21:31:51,969 DEBUG [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(1325): Online Regions={d725ced47985accfdccea3024939292e=TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e., 1588230740=hbase:meta,,1.1588230740, cc8225a2b44932338372e937911a920c=TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c.} 2024-12-02T21:31:51,969 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:31:51,969 DEBUG [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, cc8225a2b44932338372e937911a920c, d725ced47985accfdccea3024939292e 2024-12-02T21:31:51,969 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:31:51,969 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:31:51,969 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:31:51,969 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:31:51,969 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2->hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6971c7b12714adabba25079dbd8fdf8-top, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-3d976e91367d4e62bfeae47b85ccddf0, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/611b47174d624f8bb1d5792f143f70bd, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-4515bdbac98a45f3a533fcbb892d5f3d, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/93b2234fd8154620bc6a1a7a27cb88fe, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/4a41070733ce449b8e3f98b9401ca346, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/962cd0bdacab406d91fc126944a06ad2, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/33dfcde2e9f04f88a73f1c59b060ac59, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/9f6f0358e0c143c387930ceb4c38280d, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a64f38687853482290f7b84bfdba6c02, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a8e444519a8c413cafbe54cff22f2f35, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/7a124b5df6754b41a7497d21d0bca255, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/9f83ccc83cff446eb1b0490cd637b90f, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/20c766714ded4a248dff2e0d4f3e7bdf, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/ebc3099e182146ae81707f3239808485, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/4689ca1957964eb783d91510cb5b6518, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/7c0641a4c59a480d8a5f2dfd274427fc, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/e62126cc0da94383bc87336e3af7ab72, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/94d1c1477f834378b1160501334c6991, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/8c1996cb870f4fc1997bf113d3f63bfa, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/b38a173c47664f70b331f5af226684a0] to archive 2024-12-02T21:31:51,971 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T21:31:51,974 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:51,975 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-3d976e91367d4e62bfeae47b85ccddf0 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-3d976e91367d4e62bfeae47b85ccddf0 2024-12-02T21:31:51,976 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-02T21:31:51,976 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:31:51,976 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:31:51,976 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733175111969Running coprocessor pre-close hooks at 1733175111969Disabling compacts and flushes for region at 1733175111969Disabling writes for close at 1733175111969Writing region close event to WAL at 1733175111972 (+3 ms)Running coprocessor post-close hooks at 1733175111976 (+4 ms)Closed at 1733175111976 2024-12-02T21:31:51,977 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/611b47174d624f8bb1d5792f143f70bd to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/611b47174d624f8bb1d5792f143f70bd 2024-12-02T21:31:51,977 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:31:51,977 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-4515bdbac98a45f3a533fcbb892d5f3d to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/TestLogRolling-testLogRolling=4969b595c3fd7103c37e1a4903c80ee2-4515bdbac98a45f3a533fcbb892d5f3d 2024-12-02T21:31:51,978 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/93b2234fd8154620bc6a1a7a27cb88fe to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/93b2234fd8154620bc6a1a7a27cb88fe 2024-12-02T21:31:51,979 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/4a41070733ce449b8e3f98b9401ca346 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/4a41070733ce449b8e3f98b9401ca346 2024-12-02T21:31:51,980 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/962cd0bdacab406d91fc126944a06ad2 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/962cd0bdacab406d91fc126944a06ad2 2024-12-02T21:31:51,981 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/33dfcde2e9f04f88a73f1c59b060ac59 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/33dfcde2e9f04f88a73f1c59b060ac59 2024-12-02T21:31:51,982 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/9f6f0358e0c143c387930ceb4c38280d to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/9f6f0358e0c143c387930ceb4c38280d 2024-12-02T21:31:51,983 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a64f38687853482290f7b84bfdba6c02 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a64f38687853482290f7b84bfdba6c02 2024-12-02T21:31:51,983 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a8e444519a8c413cafbe54cff22f2f35 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/a8e444519a8c413cafbe54cff22f2f35 2024-12-02T21:31:51,984 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/7a124b5df6754b41a7497d21d0bca255 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/7a124b5df6754b41a7497d21d0bca255 2024-12-02T21:31:51,985 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/9f83ccc83cff446eb1b0490cd637b90f to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/9f83ccc83cff446eb1b0490cd637b90f 2024-12-02T21:31:51,986 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/20c766714ded4a248dff2e0d4f3e7bdf to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/20c766714ded4a248dff2e0d4f3e7bdf 2024-12-02T21:31:51,987 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/ebc3099e182146ae81707f3239808485 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/ebc3099e182146ae81707f3239808485 2024-12-02T21:31:51,987 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/4689ca1957964eb783d91510cb5b6518 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/4689ca1957964eb783d91510cb5b6518 2024-12-02T21:31:51,990 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/7c0641a4c59a480d8a5f2dfd274427fc to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/7c0641a4c59a480d8a5f2dfd274427fc 2024-12-02T21:31:51,991 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/e62126cc0da94383bc87336e3af7ab72 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/e62126cc0da94383bc87336e3af7ab72 2024-12-02T21:31:51,992 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/94d1c1477f834378b1160501334c6991 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/94d1c1477f834378b1160501334c6991 2024-12-02T21:31:51,993 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/8c1996cb870f4fc1997bf113d3f63bfa to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/8c1996cb870f4fc1997bf113d3f63bfa 2024-12-02T21:31:51,994 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/b38a173c47664f70b331f5af226684a0 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/info/b38a173c47664f70b331f5af226684a0 2024-12-02T21:31:51,995 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=87c3fdb6c570:37427 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-02T21:31:51,995 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [611b47174d624f8bb1d5792f143f70bd=42984, 93b2234fd8154620bc6a1a7a27cb88fe=12516, 4a41070733ce449b8e3f98b9401ca346=64714, 962cd0bdacab406d91fc126944a06ad2=19000, 33dfcde2e9f04f88a73f1c59b060ac59=19000, 9f6f0358e0c143c387930ceb4c38280d=97233, a64f38687853482290f7b84bfdba6c02=23316, a8e444519a8c413cafbe54cff22f2f35=19000, 7a124b5df6754b41a7497d21d0bca255=118899, 9f83ccc83cff446eb1b0490cd637b90f=12516, 20c766714ded4a248dff2e0d4f3e7bdf=27628, ebc3099e182146ae81707f3239808485=149390, 4689ca1957964eb783d91510cb5b6518=12516, 7c0641a4c59a480d8a5f2dfd274427fc=17918, e62126cc0da94383bc87336e3af7ab72=175396, 94d1c1477f834378b1160501334c6991=17918, 8c1996cb870f4fc1997bf113d3f63bfa=12523, b38a173c47664f70b331f5af226684a0=17918] 2024-12-02T21:31:51,998 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/d725ced47985accfdccea3024939292e/recovered.edits/342.seqid, newMaxSeqId=342, maxSeqId=130 2024-12-02T21:31:51,998 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:51,998 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d725ced47985accfdccea3024939292e: Waiting for close lock at 1733175111968Running coprocessor pre-close hooks at 1733175111968Disabling compacts and flushes for region at 1733175111968Disabling writes for close at 1733175111968Writing region close event to WAL at 1733175111995 (+27 ms)Running coprocessor post-close hooks at 1733175111998 (+3 ms)Closed at 1733175111998 2024-12-02T21:31:51,999 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733175079356.d725ced47985accfdccea3024939292e. 2024-12-02T21:31:51,999 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cc8225a2b44932338372e937911a920c, disabling compactions & flushes 2024-12-02T21:31:51,999 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c. 2024-12-02T21:31:51,999 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c. 2024-12-02T21:31:51,999 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c. after waiting 0 ms 2024-12-02T21:31:51,999 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c. 2024-12-02T21:31:51,999 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2->hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/4969b595c3fd7103c37e1a4903c80ee2/info/a6971c7b12714adabba25079dbd8fdf8-bottom] to archive 2024-12-02T21:31:52,000 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T21:31:52,001 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2 to hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/archive/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/info/a6971c7b12714adabba25079dbd8fdf8.4969b595c3fd7103c37e1a4903c80ee2 2024-12-02T21:31:52,001 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-02T21:31:52,005 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/data/default/TestLogRolling-testLogRolling/cc8225a2b44932338372e937911a920c/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-12-02T21:31:52,005 INFO [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c. 2024-12-02T21:31:52,005 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cc8225a2b44932338372e937911a920c: Waiting for close lock at 1733175111999Running coprocessor pre-close hooks at 1733175111999Disabling compacts and flushes for region at 1733175111999Disabling writes for close at 1733175111999Writing region close event to WAL at 1733175112001 (+2 ms)Running coprocessor post-close hooks at 1733175112005 (+4 ms)Closed at 1733175112005 2024-12-02T21:31:52,005 DEBUG [RS_CLOSE_REGION-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733175079356.cc8225a2b44932338372e937911a920c. 2024-12-02T21:31:52,169 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(976): stopping server 87c3fdb6c570,34593,1733175063928; all regions closed. 2024-12-02T21:31:52,170 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,171 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,171 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,171 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,171 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741834_1010 (size=8107) 2024-12-02T21:31:52,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741834_1010 (size=8107) 2024-12-02T21:31:52,181 DEBUG [RS:0;87c3fdb6c570:34593 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/oldWALs 2024-12-02T21:31:52,181 INFO [RS:0;87c3fdb6c570:34593 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C34593%2C1733175063928.meta:.meta(num 1733175064890) 2024-12-02T21:31:52,182 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,182 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,182 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,182 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,182 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741880_1056 (size=780) 2024-12-02T21:31:52,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741880_1056 (size=780) 2024-12-02T21:31:52,188 DEBUG [RS:0;87c3fdb6c570:34593 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/oldWALs 2024-12-02T21:31:52,188 INFO [RS:0;87c3fdb6c570:34593 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C34593%2C1733175063928:(num 1733175111904) 2024-12-02T21:31:52,188 DEBUG [RS:0;87c3fdb6c570:34593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:52,188 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:31:52,188 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:31:52,188 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.ChoreService(370): Chore service for: regionserver/87c3fdb6c570:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T21:31:52,188 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:31:52,188 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:31:52,189 INFO [RS:0;87c3fdb6c570:34593 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34593 2024-12-02T21:31:52,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87c3fdb6c570,34593,1733175063928 2024-12-02T21:31:52,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:31:52,197 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:31:52,207 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87c3fdb6c570,34593,1733175063928] 2024-12-02T21:31:52,217 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87c3fdb6c570,34593,1733175063928 already deleted, retry=false 2024-12-02T21:31:52,218 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87c3fdb6c570,34593,1733175063928 expired; onlineServers=0 2024-12-02T21:31:52,218 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87c3fdb6c570,37427,1733175063707' ***** 2024-12-02T21:31:52,218 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:31:52,218 INFO [M:0;87c3fdb6c570:37427 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:31:52,218 INFO [M:0;87c3fdb6c570:37427 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:31:52,218 DEBUG [M:0;87c3fdb6c570:37427 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:31:52,218 DEBUG [M:0;87c3fdb6c570:37427 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:31:52,218 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:31:52,218 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733175064264 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733175064264,5,FailOnTimeoutGroup] 2024-12-02T21:31:52,218 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733175064263 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733175064263,5,FailOnTimeoutGroup] 2024-12-02T21:31:52,218 INFO [M:0;87c3fdb6c570:37427 {}] hbase.ChoreService(370): Chore service for: master/87c3fdb6c570:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T21:31:52,219 INFO [M:0;87c3fdb6c570:37427 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:31:52,219 DEBUG [M:0;87c3fdb6c570:37427 {}] master.HMaster(1795): Stopping service threads 2024-12-02T21:31:52,219 INFO [M:0;87c3fdb6c570:37427 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:31:52,219 INFO [M:0;87c3fdb6c570:37427 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:31:52,219 INFO [M:0;87c3fdb6c570:37427 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:31:52,219 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:31:52,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:31:52,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:52,228 DEBUG [M:0;87c3fdb6c570:37427 {}] zookeeper.ZKUtil(347): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:31:52,228 WARN [M:0;87c3fdb6c570:37427 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:31:52,229 INFO [M:0;87c3fdb6c570:37427 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/.lastflushedseqids 2024-12-02T21:31:52,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741881_1057 (size=228) 2024-12-02T21:31:52,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741881_1057 (size=228) 2024-12-02T21:31:52,239 INFO [M:0;87c3fdb6c570:37427 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T21:31:52,240 INFO [M:0;87c3fdb6c570:37427 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:31:52,240 DEBUG [M:0;87c3fdb6c570:37427 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:31:52,240 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:52,240 DEBUG [M:0;87c3fdb6c570:37427 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:52,240 DEBUG [M:0;87c3fdb6c570:37427 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:31:52,240 DEBUG [M:0;87c3fdb6c570:37427 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:52,240 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.44 KB heapSize=63.39 KB 2024-12-02T21:31:52,256 DEBUG [M:0;87c3fdb6c570:37427 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c809afc204b34163a007eca1946c97e9 is 82, key is hbase:meta,,1/info:regioninfo/1733175064921/Put/seqid=0 2024-12-02T21:31:52,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741882_1058 (size=5672) 2024-12-02T21:31:52,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741882_1058 (size=5672) 2024-12-02T21:31:52,260 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c809afc204b34163a007eca1946c97e9 2024-12-02T21:31:52,279 DEBUG [M:0;87c3fdb6c570:37427 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7e11c55cb7f94550bbafa2690d1fc539 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733175065456/Put/seqid=0 2024-12-02T21:31:52,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741883_1059 (size=7091) 2024-12-02T21:31:52,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741883_1059 (size=7091) 2024-12-02T21:31:52,284 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.84 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7e11c55cb7f94550bbafa2690d1fc539 2024-12-02T21:31:52,287 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7e11c55cb7f94550bbafa2690d1fc539 2024-12-02T21:31:52,299 DEBUG [M:0;87c3fdb6c570:37427 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ecf23e4d4c244bf878b0482ace9a0aa is 69, key is 87c3fdb6c570,34593,1733175063928/rs:state/1733175064397/Put/seqid=0 2024-12-02T21:31:52,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741884_1060 (size=5156) 2024-12-02T21:31:52,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741884_1060 (size=5156) 2024-12-02T21:31:52,304 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ecf23e4d4c244bf878b0482ace9a0aa 2024-12-02T21:31:52,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:52,307 INFO [RS:0;87c3fdb6c570:34593 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:31:52,307 INFO [RS:0;87c3fdb6c570:34593 {}] regionserver.HRegionServer(1031): Exiting; stopping=87c3fdb6c570,34593,1733175063928; zookeeper connection closed. 2024-12-02T21:31:52,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x10197f4b0350001, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:52,307 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4be4f013 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4be4f013 2024-12-02T21:31:52,308 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T21:31:52,319 DEBUG [M:0;87c3fdb6c570:37427 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/615ef7861a7441e199486622f64f4803 is 52, key is load_balancer_on/state:d/1733175065063/Put/seqid=0 2024-12-02T21:31:52,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741885_1061 (size=5056) 2024-12-02T21:31:52,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741885_1061 (size=5056) 2024-12-02T21:31:52,324 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/615ef7861a7441e199486622f64f4803 2024-12-02T21:31:52,328 DEBUG [M:0;87c3fdb6c570:37427 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c809afc204b34163a007eca1946c97e9 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c809afc204b34163a007eca1946c97e9 2024-12-02T21:31:52,331 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c809afc204b34163a007eca1946c97e9, entries=8, sequenceid=125, filesize=5.5 K 2024-12-02T21:31:52,332 DEBUG [M:0;87c3fdb6c570:37427 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7e11c55cb7f94550bbafa2690d1fc539 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7e11c55cb7f94550bbafa2690d1fc539 2024-12-02T21:31:52,336 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7e11c55cb7f94550bbafa2690d1fc539 2024-12-02T21:31:52,336 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7e11c55cb7f94550bbafa2690d1fc539, entries=13, sequenceid=125, filesize=6.9 K 2024-12-02T21:31:52,337 DEBUG [M:0;87c3fdb6c570:37427 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ecf23e4d4c244bf878b0482ace9a0aa as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0ecf23e4d4c244bf878b0482ace9a0aa 2024-12-02T21:31:52,342 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0ecf23e4d4c244bf878b0482ace9a0aa, entries=1, sequenceid=125, filesize=5.0 K 2024-12-02T21:31:52,343 DEBUG [M:0;87c3fdb6c570:37427 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/615ef7861a7441e199486622f64f4803 as hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/615ef7861a7441e199486622f64f4803 2024-12-02T21:31:52,348 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45713/user/jenkins/test-data/95e61529-e254-f2fe-05a3-05213238059a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/615ef7861a7441e199486622f64f4803, entries=1, sequenceid=125, filesize=4.9 K 2024-12-02T21:31:52,349 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=125, compaction requested=false 2024-12-02T21:31:52,350 INFO [M:0;87c3fdb6c570:37427 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:52,350 DEBUG [M:0;87c3fdb6c570:37427 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733175112240Disabling compacts and flushes for region at 1733175112240Disabling writes for close at 1733175112240Obtaining lock to block concurrent updates at 1733175112240Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733175112240Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52675, getHeapSize=64848, getOffHeapSize=0, getCellsCount=148 at 1733175112240Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733175112241 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733175112241Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733175112256 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733175112256Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733175112264 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733175112278 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733175112278Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733175112287 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733175112299 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733175112299Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733175112307 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733175112319 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733175112319Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58996d48: reopening flushed file at 1733175112327 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e64f2ef: reopening flushed file at 1733175112331 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b737e43: reopening flushed file at 1733175112336 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d1a4795: reopening flushed file at 1733175112342 (+6 ms)Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=125, compaction requested=false at 1733175112349 (+7 ms)Writing region close event to WAL at 1733175112350 (+1 ms)Closed at 1733175112350 2024-12-02T21:31:52,350 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,350 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,350 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,350 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,350 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:52,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741830_1006 (size=61344) 2024-12-02T21:31:52,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34571 is added to blk_1073741830_1006 (size=61344) 2024-12-02T21:31:52,353 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:31:52,353 INFO [M:0;87c3fdb6c570:37427 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T21:31:52,353 INFO [M:0;87c3fdb6c570:37427 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37427 2024-12-02T21:31:52,353 INFO [M:0;87c3fdb6c570:37427 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:31:52,424 INFO [regionserver/87c3fdb6c570:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:31:52,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:52,465 INFO [M:0;87c3fdb6c570:37427 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:31:52,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37427-0x10197f4b0350000, quorum=127.0.0.1:58323, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:52,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5eab25ce{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:31:52,470 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5773e0ea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:31:52,470 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:31:52,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70aed17c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:31:52,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@208716aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/hadoop.log.dir/,STOPPED} 2024-12-02T21:31:52,473 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:31:52,473 WARN [BP-193301389-172.17.0.3-1733175061531 heartbeating to localhost/127.0.0.1:45713 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:31:52,473 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:31:52,473 WARN [BP-193301389-172.17.0.3-1733175061531 heartbeating to localhost/127.0.0.1:45713 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-193301389-172.17.0.3-1733175061531 (Datanode Uuid f044438b-b183-4f4c-92e3-760fac304e1d) service to localhost/127.0.0.1:45713 2024-12-02T21:31:52,474 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5/data/data3/current/BP-193301389-172.17.0.3-1733175061531 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:52,474 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5/data/data4/current/BP-193301389-172.17.0.3-1733175061531 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:52,474 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:31:52,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9530f47{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:31:52,477 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59fd7f75{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:31:52,477 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:31:52,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1724ca70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:31:52,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c08109c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/hadoop.log.dir/,STOPPED} 2024-12-02T21:31:52,478 WARN [BP-193301389-172.17.0.3-1733175061531 heartbeating to localhost/127.0.0.1:45713 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:31:52,478 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:31:52,478 WARN [BP-193301389-172.17.0.3-1733175061531 heartbeating to localhost/127.0.0.1:45713 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-193301389-172.17.0.3-1733175061531 (Datanode Uuid 413c5099-86fd-43ba-b884-ff6f625b9fb5) service to localhost/127.0.0.1:45713 2024-12-02T21:31:52,478 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:31:52,478 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5/data/data1/current/BP-193301389-172.17.0.3-1733175061531 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:52,479 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/cluster_09d803ff-c21c-c6cf-cc7b-7a8cc7b5f7e5/data/data2/current/BP-193301389-172.17.0.3-1733175061531 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:52,479 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:31:52,483 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cf57c4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:31:52,483 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a67ff9c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:31:52,483 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:31:52,484 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f8818bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:31:52,484 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24350cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/hadoop.log.dir/,STOPPED} 2024-12-02T21:31:52,489 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:31:52,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T21:31:52,525 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 208) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45713 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45713 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45713 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45713 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45713 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:45713 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45713 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45713 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=48 (was 55), ProcessCount=11 (was 11), AvailableMemoryMB=6589 (was 6622) 2024-12-02T21:31:52,531 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=48, ProcessCount=11, AvailableMemoryMB=6589 2024-12-02T21:31:52,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:31:52,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/hadoop.log.dir so I do NOT create it in target/test-data/52d52759-fe82-7199-de29-dfb591555f6b 2024-12-02T21:31:52,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0897e75f-ee97-709a-4887-62fc8a5d7ce6/hadoop.tmp.dir so I do NOT create it in target/test-data/52d52759-fe82-7199-de29-dfb591555f6b 2024-12-02T21:31:52,532 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a, deleteOnExit=true 2024-12-02T21:31:52,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T21:31:52,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/test.cache.data in system properties and HBase conf 2024-12-02T21:31:52,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:31:52,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:31:52,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:31:52,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:31:52,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T21:31:52,532 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:31:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:31:52,546 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:31:52,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:52,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:53,189 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:31:53,191 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:31:53,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:31:53,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:31:53,193 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:31:53,193 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:31:53,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31d39f87{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:31:53,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64e1b9c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:31:53,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c66b7d1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/java.io.tmpdir/jetty-localhost-39257-hadoop-hdfs-3_4_1-tests_jar-_-any-3537938515048137974/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:31:53,281 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2c0b3275{HTTP/1.1, (http/1.1)}{localhost:39257} 2024-12-02T21:31:53,281 INFO [Time-limited test {}] server.Server(415): Started @303273ms 2024-12-02T21:31:53,290 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:31:53,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:53,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:53,610 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:31:53,612 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:31:53,612 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:31:53,612 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:31:53,613 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:31:53,613 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cb9612e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:31:53,613 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2601a9a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:31:53,704 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@56fc288a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/java.io.tmpdir/jetty-localhost-46537-hadoop-hdfs-3_4_1-tests_jar-_-any-11090073408654241495/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:31:53,704 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@541a748b{HTTP/1.1, (http/1.1)}{localhost:46537} 2024-12-02T21:31:53,704 INFO [Time-limited test {}] server.Server(415): Started @303696ms 2024-12-02T21:31:53,705 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:31:53,729 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:31:53,731 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:31:53,731 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:31:53,731 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:31:53,731 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:31:53,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55931fd2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:31:53,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f4abee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:31:53,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e0b3b7c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/java.io.tmpdir/jetty-localhost-41815-hadoop-hdfs-3_4_1-tests_jar-_-any-1199865873482717806/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:31:53,821 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@120711d3{HTTP/1.1, (http/1.1)}{localhost:41815} 2024-12-02T21:31:53,821 INFO [Time-limited test {}] server.Server(415): Started @303813ms 2024-12-02T21:31:53,822 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:31:54,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:54,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:54,889 WARN [Thread-2510 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a/data/data1/current/BP-1837138859-172.17.0.3-1733175112549/current, will proceed with Du for space computation calculation, 2024-12-02T21:31:54,889 WARN [Thread-2511 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a/data/data2/current/BP-1837138859-172.17.0.3-1733175112549/current, will proceed with Du for space computation calculation, 2024-12-02T21:31:54,908 WARN [Thread-2473 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:31:54,914 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a1c846a072f0af0 with lease ID 0xf72a56c645cd7167: Processing first storage report for DS-cbbfa462-e0d0-4a58-84ed-14273936cb59 from datanode DatanodeRegistration(127.0.0.1:45351, datanodeUuid=d1003b37-d2fe-49a7-95a5-f21e53b246b3, infoPort=41839, infoSecurePort=0, ipcPort=33677, storageInfo=lv=-57;cid=testClusterID;nsid=1479039784;c=1733175112549) 2024-12-02T21:31:54,914 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a1c846a072f0af0 with lease ID 0xf72a56c645cd7167: from storage DS-cbbfa462-e0d0-4a58-84ed-14273936cb59 node DatanodeRegistration(127.0.0.1:45351, datanodeUuid=d1003b37-d2fe-49a7-95a5-f21e53b246b3, infoPort=41839, infoSecurePort=0, ipcPort=33677, storageInfo=lv=-57;cid=testClusterID;nsid=1479039784;c=1733175112549), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:31:54,914 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a1c846a072f0af0 with lease ID 0xf72a56c645cd7167: Processing first storage report for DS-4cede157-0717-47a6-95ed-bb1d987afb1e from datanode DatanodeRegistration(127.0.0.1:45351, datanodeUuid=d1003b37-d2fe-49a7-95a5-f21e53b246b3, infoPort=41839, infoSecurePort=0, ipcPort=33677, storageInfo=lv=-57;cid=testClusterID;nsid=1479039784;c=1733175112549) 2024-12-02T21:31:54,914 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a1c846a072f0af0 with lease ID 0xf72a56c645cd7167: from storage DS-4cede157-0717-47a6-95ed-bb1d987afb1e node DatanodeRegistration(127.0.0.1:45351, datanodeUuid=d1003b37-d2fe-49a7-95a5-f21e53b246b3, infoPort=41839, infoSecurePort=0, ipcPort=33677, storageInfo=lv=-57;cid=testClusterID;nsid=1479039784;c=1733175112549), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:31:55,036 WARN [Thread-2521 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a/data/data3/current/BP-1837138859-172.17.0.3-1733175112549/current, will proceed with Du for space computation calculation, 2024-12-02T21:31:55,036 WARN [Thread-2522 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a/data/data4/current/BP-1837138859-172.17.0.3-1733175112549/current, will proceed with Du for space computation calculation, 2024-12-02T21:31:55,056 WARN [Thread-2496 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:31:55,058 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x415000b1c1a50e51 with lease ID 0xf72a56c645cd7168: Processing first storage report for DS-817094f8-1f59-42b2-9dc4-26f5f2a6417d from datanode DatanodeRegistration(127.0.0.1:35999, datanodeUuid=10c79739-4a7b-442f-bccc-e2c234a2e829, infoPort=34285, infoSecurePort=0, ipcPort=43173, storageInfo=lv=-57;cid=testClusterID;nsid=1479039784;c=1733175112549) 2024-12-02T21:31:55,058 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x415000b1c1a50e51 with lease ID 0xf72a56c645cd7168: from storage DS-817094f8-1f59-42b2-9dc4-26f5f2a6417d node DatanodeRegistration(127.0.0.1:35999, datanodeUuid=10c79739-4a7b-442f-bccc-e2c234a2e829, infoPort=34285, infoSecurePort=0, ipcPort=43173, storageInfo=lv=-57;cid=testClusterID;nsid=1479039784;c=1733175112549), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:31:55,058 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x415000b1c1a50e51 with lease ID 0xf72a56c645cd7168: Processing first storage report for DS-ad879148-ecc5-45ff-95bc-7ac7debef1e8 from datanode DatanodeRegistration(127.0.0.1:35999, datanodeUuid=10c79739-4a7b-442f-bccc-e2c234a2e829, infoPort=34285, infoSecurePort=0, ipcPort=43173, storageInfo=lv=-57;cid=testClusterID;nsid=1479039784;c=1733175112549) 2024-12-02T21:31:55,058 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x415000b1c1a50e51 with lease ID 0xf72a56c645cd7168: from storage DS-ad879148-ecc5-45ff-95bc-7ac7debef1e8 node DatanodeRegistration(127.0.0.1:35999, datanodeUuid=10c79739-4a7b-442f-bccc-e2c234a2e829, infoPort=34285, infoSecurePort=0, ipcPort=43173, storageInfo=lv=-57;cid=testClusterID;nsid=1479039784;c=1733175112549), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:31:55,066 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b 2024-12-02T21:31:55,092 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a/zookeeper_0, clientPort=52579, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:31:55,094 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52579 2024-12-02T21:31:55,094 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:55,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:55,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:31:55,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:31:55,107 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0 with version=8 2024-12-02T21:31:55,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43567/user/jenkins/test-data/ab702c39-cee5-eb36-2518-19bad62931c2/hbase-staging 2024-12-02T21:31:55,108 INFO [Time-limited test {}] client.ConnectionUtils(128): master/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:31:55,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:55,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:55,108 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:31:55,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:55,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:31:55,109 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T21:31:55,109 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:31:55,109 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44197 2024-12-02T21:31:55,110 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44197 connecting to ZooKeeper ensemble=127.0.0.1:52579 2024-12-02T21:31:55,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:441970x0, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:31:55,181 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44197-0x10197f578ff0000 connected 2024-12-02T21:31:55,260 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:55,262 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:55,264 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:31:55,264 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0, hbase.cluster.distributed=false 2024-12-02T21:31:55,267 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:31:55,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44197 2024-12-02T21:31:55,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44197 2024-12-02T21:31:55,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44197 2024-12-02T21:31:55,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44197 2024-12-02T21:31:55,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44197 2024-12-02T21:31:55,285 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/87c3fdb6c570:0 server-side Connection retries=45 2024-12-02T21:31:55,285 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:55,285 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:55,285 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:31:55,285 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:31:55,285 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:31:55,285 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:31:55,285 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:31:55,286 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37239 2024-12-02T21:31:55,287 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37239 connecting to ZooKeeper ensemble=127.0.0.1:52579 2024-12-02T21:31:55,287 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:55,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:55,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:372390x0, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:31:55,302 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37239-0x10197f578ff0001 connected 2024-12-02T21:31:55,302 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:31:55,302 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:31:55,303 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:31:55,303 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:31:55,304 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:31:55,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37239 2024-12-02T21:31:55,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37239 2024-12-02T21:31:55,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37239 2024-12-02T21:31:55,306 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37239 2024-12-02T21:31:55,306 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37239 2024-12-02T21:31:55,320 DEBUG [M:0;87c3fdb6c570:44197 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;87c3fdb6c570:44197 2024-12-02T21:31:55,320 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/87c3fdb6c570,44197,1733175115108 2024-12-02T21:31:55,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:31:55,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:31:55,334 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/87c3fdb6c570,44197,1733175115108 2024-12-02T21:31:55,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:55,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:31:55,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:55,344 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:31:55,345 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/87c3fdb6c570,44197,1733175115108 from backup master directory 2024-12-02T21:31:55,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/87c3fdb6c570,44197,1733175115108 2024-12-02T21:31:55,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:31:55,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:31:55,354 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:31:55,354 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=87c3fdb6c570,44197,1733175115108 2024-12-02T21:31:55,361 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/hbase.id] with ID: 0d7ab248-1d6a-40c4-9148-02afc0479b69 2024-12-02T21:31:55,362 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/.tmp/hbase.id 2024-12-02T21:31:55,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:31:55,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:31:55,370 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/.tmp/hbase.id]:[hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/hbase.id] 2024-12-02T21:31:55,380 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:55,380 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T21:31:55,381 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-02T21:31:55,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:55,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:55,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:31:55,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:31:55,400 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:31:55,401 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:31:55,401 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:31:55,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:31:55,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:31:55,408 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store 2024-12-02T21:31:55,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:31:55,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:31:55,414 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:55,414 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:31:55,414 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:55,414 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:55,414 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:31:55,414 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:55,414 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:55,414 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733175115414Disabling compacts and flushes for region at 1733175115414Disabling writes for close at 1733175115414Writing region close event to WAL at 1733175115414Closed at 1733175115414 2024-12-02T21:31:55,415 WARN [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/.initializing 2024-12-02T21:31:55,415 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/WALs/87c3fdb6c570,44197,1733175115108 2024-12-02T21:31:55,417 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C44197%2C1733175115108, suffix=, logDir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/WALs/87c3fdb6c570,44197,1733175115108, archiveDir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/oldWALs, maxLogs=10 2024-12-02T21:31:55,418 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C44197%2C1733175115108.1733175115417 2024-12-02T21:31:55,421 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/WALs/87c3fdb6c570,44197,1733175115108/87c3fdb6c570%2C44197%2C1733175115108.1733175115417 2024-12-02T21:31:55,422 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34285:34285),(127.0.0.1/127.0.0.1:41839:41839)] 2024-12-02T21:31:55,423 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:31:55,423 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:55,423 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,423 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,424 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,425 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:31:55,425 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:55,426 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:55,426 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,426 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:31:55,427 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:55,427 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:31:55,427 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,428 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:31:55,428 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:55,428 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:31:55,428 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,429 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:31:55,429 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:55,429 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:31:55,430 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,430 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,430 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,431 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,431 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,432 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:31:55,432 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:31:55,434 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:31:55,435 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808376, jitterRate=0.027903422713279724}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:31:55,435 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733175115423Initializing all the Stores at 1733175115424 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175115424Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175115424Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175115424Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175115424Cleaning up temporary data from old regions at 1733175115431 (+7 ms)Region opened successfully at 1733175115435 (+4 ms) 2024-12-02T21:31:55,435 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:31:55,438 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f9a7f3a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:31:55,439 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T21:31:55,439 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:31:55,439 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:31:55,439 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:31:55,439 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T21:31:55,440 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T21:31:55,440 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:31:55,441 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:31:55,442 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:31:55,449 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:31:55,449 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:31:55,450 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:31:55,459 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:31:55,460 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:31:55,461 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:31:55,470 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:31:55,471 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:31:55,480 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:31:55,483 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:31:55,491 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:31:55,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:31:55,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:31:55,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:55,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:55,502 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=87c3fdb6c570,44197,1733175115108, sessionid=0x10197f578ff0000, setting cluster-up flag (Was=false) 2024-12-02T21:31:55,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:55,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:55,554 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:31:55,555 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,44197,1733175115108 2024-12-02T21:31:55,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:55,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:55,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:55,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:55,607 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:31:55,608 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=87c3fdb6c570,44197,1733175115108 2024-12-02T21:31:55,610 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T21:31:55,612 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T21:31:55,613 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T21:31:55,613 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:31:55,613 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 87c3fdb6c570,44197,1733175115108 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:31:55,615 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:31:55,615 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:31:55,615 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:31:55,615 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:31:55,615 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/87c3fdb6c570:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:31:55,615 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,615 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:31:55,615 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,616 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733175145616 2024-12-02T21:31:55,617 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:31:55,617 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:31:55,617 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:31:55,617 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:31:55,617 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:31:55,617 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:31:55,617 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,617 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:31:55,617 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:31:55,618 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:31:55,618 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:31:55,618 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:31:55,618 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:31:55,618 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:31:55,618 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733175115618,5,FailOnTimeoutGroup] 2024-12-02T21:31:55,619 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733175115619,5,FailOnTimeoutGroup] 2024-12-02T21:31:55,619 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,619 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:55,619 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:31:55,619 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,619 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,619 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:31:55,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:31:55,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741831_1007 (size=1321) 2024-12-02T21:31:55,628 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T21:31:55,628 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0 2024-12-02T21:31:55,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:31:55,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:31:55,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:55,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:31:55,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:31:55,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:55,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:55,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:31:55,642 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:31:55,642 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:55,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:55,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:31:55,644 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:31:55,645 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:55,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:55,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:31:55,647 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:31:55,647 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:55,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:55,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:31:55,648 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/1588230740 2024-12-02T21:31:55,649 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/1588230740 2024-12-02T21:31:55,651 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:31:55,651 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:31:55,651 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:31:55,653 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:31:55,655 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:31:55,656 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803961, jitterRate=0.02228972315788269}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:31:55,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733175115638Initializing all the Stores at 1733175115638Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175115638Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175115639 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175115639Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175115639Cleaning up temporary data from old regions at 1733175115651 (+12 ms)Region opened successfully at 1733175115657 (+6 ms) 2024-12-02T21:31:55,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:31:55,657 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:31:55,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:31:55,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:31:55,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:31:55,657 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:31:55,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733175115657Disabling compacts and flushes for region at 1733175115657Disabling writes for close at 1733175115657Writing region close event to WAL at 1733175115657Closed at 1733175115657 2024-12-02T21:31:55,659 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:31:55,659 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T21:31:55,659 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:31:55,660 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:31:55,661 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:31:55,709 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(746): ClusterId : 0d7ab248-1d6a-40c4-9148-02afc0479b69 2024-12-02T21:31:55,709 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:31:55,726 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:31:55,726 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:31:55,736 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:31:55,737 DEBUG [RS:0;87c3fdb6c570:37239 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@aed719b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=87c3fdb6c570/172.17.0.3:0 2024-12-02T21:31:55,755 DEBUG [RS:0;87c3fdb6c570:37239 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;87c3fdb6c570:37239 2024-12-02T21:31:55,755 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T21:31:55,755 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T21:31:55,755 DEBUG [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T21:31:55,755 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(2659): reportForDuty to master=87c3fdb6c570,44197,1733175115108 with port=37239, startcode=1733175115285 2024-12-02T21:31:55,755 DEBUG [RS:0;87c3fdb6c570:37239 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:31:55,757 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48411, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:31:55,758 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44197 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 87c3fdb6c570,37239,1733175115285 2024-12-02T21:31:55,758 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44197 {}] master.ServerManager(517): Registering regionserver=87c3fdb6c570,37239,1733175115285 2024-12-02T21:31:55,759 DEBUG [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0 2024-12-02T21:31:55,759 DEBUG [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35135 2024-12-02T21:31:55,759 DEBUG [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T21:31:55,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:31:55,771 DEBUG [RS:0;87c3fdb6c570:37239 {}] zookeeper.ZKUtil(111): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/87c3fdb6c570,37239,1733175115285 2024-12-02T21:31:55,772 WARN [RS:0;87c3fdb6c570:37239 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:31:55,772 INFO [RS:0;87c3fdb6c570:37239 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:31:55,772 DEBUG [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/WALs/87c3fdb6c570,37239,1733175115285 2024-12-02T21:31:55,772 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [87c3fdb6c570,37239,1733175115285] 2024-12-02T21:31:55,776 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:31:55,778 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:31:55,778 INFO [RS:0;87c3fdb6c570:37239 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:31:55,778 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,778 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T21:31:55,779 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T21:31:55,779 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,779 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,779 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,779 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,779 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,780 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,780 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/87c3fdb6c570:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:31:55,780 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,780 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,780 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,780 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,780 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,780 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/87c3fdb6c570:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:31:55,780 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:31:55,780 DEBUG [RS:0;87c3fdb6c570:37239 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/87c3fdb6c570:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:31:55,780 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,780 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,781 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,781 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,781 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,781 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,37239,1733175115285-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:31:55,796 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:31:55,796 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,37239,1733175115285-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,796 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,796 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.Replication(171): 87c3fdb6c570,37239,1733175115285 started 2024-12-02T21:31:55,807 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:55,808 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(1482): Serving as 87c3fdb6c570,37239,1733175115285, RpcServer on 87c3fdb6c570/172.17.0.3:37239, sessionid=0x10197f578ff0001 2024-12-02T21:31:55,808 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:31:55,808 DEBUG [RS:0;87c3fdb6c570:37239 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 87c3fdb6c570,37239,1733175115285 2024-12-02T21:31:55,808 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,37239,1733175115285' 2024-12-02T21:31:55,808 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:31:55,808 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:31:55,809 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:31:55,809 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:31:55,809 DEBUG [RS:0;87c3fdb6c570:37239 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 87c3fdb6c570,37239,1733175115285 2024-12-02T21:31:55,809 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '87c3fdb6c570,37239,1733175115285' 2024-12-02T21:31:55,809 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:31:55,809 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:31:55,809 DEBUG [RS:0;87c3fdb6c570:37239 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:31:55,809 INFO [RS:0;87c3fdb6c570:37239 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:31:55,809 INFO [RS:0;87c3fdb6c570:37239 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:31:55,812 WARN [87c3fdb6c570:44197 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T21:31:55,913 INFO [RS:0;87c3fdb6c570:37239 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C37239%2C1733175115285, suffix=, logDir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/WALs/87c3fdb6c570,37239,1733175115285, archiveDir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/oldWALs, maxLogs=32 2024-12-02T21:31:55,914 INFO [RS:0;87c3fdb6c570:37239 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C37239%2C1733175115285.1733175115913 2024-12-02T21:31:55,923 INFO [RS:0;87c3fdb6c570:37239 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/WALs/87c3fdb6c570,37239,1733175115285/87c3fdb6c570%2C37239%2C1733175115285.1733175115913 2024-12-02T21:31:55,924 DEBUG [RS:0;87c3fdb6c570:37239 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41839:41839),(127.0.0.1/127.0.0.1:34285:34285)] 2024-12-02T21:31:56,062 DEBUG [87c3fdb6c570:44197 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:31:56,063 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=87c3fdb6c570,37239,1733175115285 2024-12-02T21:31:56,066 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,37239,1733175115285, state=OPENING 2024-12-02T21:31:56,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:31:56,072 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T21:31:56,073 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-02T21:31:56,119 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:31:56,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:56,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:56,130 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:31:56,130 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:31:56,130 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:31:56,130 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,37239,1733175115285}] 2024-12-02T21:31:56,284 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:31:56,289 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36203, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:31:56,294 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T21:31:56,294 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:31:56,296 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=87c3fdb6c570%2C37239%2C1733175115285.meta, suffix=.meta, logDir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/WALs/87c3fdb6c570,37239,1733175115285, archiveDir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/oldWALs, maxLogs=32 2024-12-02T21:31:56,296 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 87c3fdb6c570%2C37239%2C1733175115285.meta.1733175116296.meta 2024-12-02T21:31:56,301 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/WALs/87c3fdb6c570,37239,1733175115285/87c3fdb6c570%2C37239%2C1733175115285.meta.1733175116296.meta 2024-12-02T21:31:56,308 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41839:41839),(127.0.0.1/127.0.0.1:34285:34285)] 2024-12-02T21:31:56,309 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:31:56,309 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:31:56,309 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:31:56,309 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:31:56,309 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:31:56,309 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:31:56,309 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T21:31:56,309 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T21:31:56,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:31:56,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:31:56,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:56,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:56,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T21:31:56,313 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T21:31:56,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:56,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:56,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:31:56,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:31:56,314 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:56,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:56,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:31:56,315 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:31:56,315 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:31:56,316 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:31:56,316 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T21:31:56,316 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/1588230740 2024-12-02T21:31:56,317 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/1588230740 2024-12-02T21:31:56,318 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T21:31:56,318 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T21:31:56,319 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:31:56,320 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T21:31:56,320 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787785, jitterRate=0.0017206966876983643}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:31:56,321 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T21:31:56,321 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733175116309Writing region info on filesystem at 1733175116309Initializing all the Stores at 1733175116310 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175116310Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175116310Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733175116310Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733175116310Cleaning up temporary data from old regions at 1733175116318 (+8 ms)Running coprocessor post-open hooks at 1733175116321 (+3 ms)Region opened successfully at 1733175116321 2024-12-02T21:31:56,322 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733175116283 2024-12-02T21:31:56,324 DEBUG [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:31:56,324 INFO [RS_OPEN_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T21:31:56,324 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=87c3fdb6c570,37239,1733175115285 2024-12-02T21:31:56,325 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 87c3fdb6c570,37239,1733175115285, state=OPEN 2024-12-02T21:31:56,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:31:56,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:31:56,383 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=87c3fdb6c570,37239,1733175115285 2024-12-02T21:31:56,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:31:56,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:31:56,388 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:31:56,388 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=87c3fdb6c570,37239,1733175115285 in 253 msec 2024-12-02T21:31:56,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:31:56,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 729 msec 2024-12-02T21:31:56,393 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:31:56,394 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T21:31:56,395 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:31:56,396 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,37239,1733175115285, seqNum=-1] 2024-12-02T21:31:56,396 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:31:56,398 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55863, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:31:56,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 793 msec 2024-12-02T21:31:56,406 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733175116406, completionTime=-1 2024-12-02T21:31:56,406 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:31:56,406 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T21:31:56,409 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T21:31:56,409 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733175176409 2024-12-02T21:31:56,409 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733175236409 2024-12-02T21:31:56,409 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-02T21:31:56,410 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,44197,1733175115108-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:56,410 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,44197,1733175115108-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:56,410 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,44197,1733175115108-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:56,410 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-87c3fdb6c570:44197, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:56,410 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:56,410 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:56,412 DEBUG [master/87c3fdb6c570:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T21:31:56,414 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.059sec 2024-12-02T21:31:56,415 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:31:56,415 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:31:56,415 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:31:56,415 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:31:56,415 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:31:56,415 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,44197,1733175115108-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:31:56,415 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,44197,1733175115108-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:31:56,417 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:31:56,417 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:31:56,418 INFO [master/87c3fdb6c570:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=87c3fdb6c570,44197,1733175115108-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:31:56,510 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b385e53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:31:56,510 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 87c3fdb6c570,44197,-1 for getting cluster id 2024-12-02T21:31:56,511 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T21:31:56,513 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0d7ab248-1d6a-40c4-9148-02afc0479b69' 2024-12-02T21:31:56,514 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T21:31:56,514 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0d7ab248-1d6a-40c4-9148-02afc0479b69" 2024-12-02T21:31:56,514 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@569b041, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:31:56,514 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [87c3fdb6c570,44197,-1] 2024-12-02T21:31:56,515 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T21:31:56,515 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:56,516 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42346, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T21:31:56,517 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@665a6083, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:31:56,518 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T21:31:56,519 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=87c3fdb6c570,37239,1733175115285, seqNum=-1] 2024-12-02T21:31:56,520 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:31:56,521 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49824, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:31:56,523 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=87c3fdb6c570,44197,1733175115108 2024-12-02T21:31:56,524 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:31:56,527 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T21:31:56,527 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:31:56,530 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/WALs/test.com,8080,1, archiveDir=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/oldWALs, maxLogs=32 2024-12-02T21:31:56,530 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733175116530 2024-12-02T21:31:56,536 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/WALs/test.com,8080,1/test.com%2C8080%2C1.1733175116530 2024-12-02T21:31:56,538 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41839:41839),(127.0.0.1/127.0.0.1:34285:34285)] 2024-12-02T21:31:56,539 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733175116538 2024-12-02T21:31:56,543 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,544 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,544 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,544 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,544 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,544 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/WALs/test.com,8080,1/test.com%2C8080%2C1.1733175116530 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/WALs/test.com,8080,1/test.com%2C8080%2C1.1733175116538 2024-12-02T21:31:56,545 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34285:34285),(127.0.0.1/127.0.0.1:41839:41839)] 2024-12-02T21:31:56,545 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/WALs/test.com,8080,1/test.com%2C8080%2C1.1733175116530 is not closed yet, will try archiving it next time 2024-12-02T21:31:56,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741835_1011 (size=93) 2024-12-02T21:31:56,546 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,546 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,546 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,546 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741835_1011 (size=93) 2024-12-02T21:31:56,546 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,547 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/WALs/test.com,8080,1/test.com%2C8080%2C1.1733175116530 to hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/oldWALs/test.com%2C8080%2C1.1733175116530 2024-12-02T21:31:56,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741836_1012 (size=93) 2024-12-02T21:31:56,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741836_1012 (size=93) 2024-12-02T21:31:56,551 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/oldWALs 2024-12-02T21:31:56,551 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733175116538) 2024-12-02T21:31:56,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T21:31:56,551 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:31:56,552 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:31:56,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:56,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:56,552 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T21:31:56,552 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:31:56,552 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=629985956, stopped=false 2024-12-02T21:31:56,552 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=87c3fdb6c570,44197,1733175115108 2024-12-02T21:31:56,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:31:56,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:31:56,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:56,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:56,571 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:31:56,571 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T21:31:56,571 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:31:56,571 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:56,571 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:31:56,571 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '87c3fdb6c570,37239,1733175115285' ***** 2024-12-02T21:31:56,571 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T21:31:56,572 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:31:56,572 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:31:56,572 INFO [RS:0;87c3fdb6c570:37239 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:31:56,572 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T21:31:56,572 INFO [RS:0;87c3fdb6c570:37239 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:31:56,572 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(959): stopping server 87c3fdb6c570,37239,1733175115285 2024-12-02T21:31:56,572 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:31:56,572 INFO [RS:0;87c3fdb6c570:37239 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;87c3fdb6c570:37239. 2024-12-02T21:31:56,572 DEBUG [RS:0;87c3fdb6c570:37239 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T21:31:56,572 DEBUG [RS:0;87c3fdb6c570:37239 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:56,572 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:31:56,572 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:31:56,572 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:31:56,572 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T21:31:56,572 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T21:31:56,572 DEBUG [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-02T21:31:56,572 DEBUG [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-02T21:31:56,572 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:31:56,573 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T21:31:56,573 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T21:31:56,573 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:31:56,573 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:31:56,573 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-02T21:31:56,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,33183,1733174923193/87c3fdb6c570%2C33183%2C1733174923193.1733174923426 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:56,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43877/user/jenkins/test-data/206d1c64-2567-8898-5ca2-22914c5d44db/WALs/87c3fdb6c570,42545,1733174921025/87c3fdb6c570%2C42545%2C1733174921025.meta.1733174923003.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:31:56,585 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/1588230740/.tmp/ns/33b7642ed1ff4cee98e87abf5e216a7a is 43, key is default/ns:d/1733175116398/Put/seqid=0 2024-12-02T21:31:56,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741837_1013 (size=5153) 2024-12-02T21:31:56,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741837_1013 (size=5153) 2024-12-02T21:31:56,590 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/1588230740/.tmp/ns/33b7642ed1ff4cee98e87abf5e216a7a 2024-12-02T21:31:56,595 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/1588230740/.tmp/ns/33b7642ed1ff4cee98e87abf5e216a7a as hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/1588230740/ns/33b7642ed1ff4cee98e87abf5e216a7a 2024-12-02T21:31:56,599 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/1588230740/ns/33b7642ed1ff4cee98e87abf5e216a7a, entries=2, sequenceid=6, filesize=5.0 K 2024-12-02T21:31:56,600 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 27ms, sequenceid=6, compaction requested=false 2024-12-02T21:31:56,600 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T21:31:56,604 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T21:31:56,604 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:31:56,604 INFO [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T21:31:56,605 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733175116572Running coprocessor pre-close hooks at 1733175116572Disabling compacts and flushes for region at 1733175116572Disabling writes for close at 1733175116573 (+1 ms)Obtaining lock to block concurrent updates at 1733175116573Preparing flush snapshotting stores in 1588230740 at 1733175116573Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733175116573Flushing stores of hbase:meta,,1.1588230740 at 1733175116573Flushing 1588230740/ns: creating writer at 1733175116574 (+1 ms)Flushing 1588230740/ns: appending metadata at 1733175116585 (+11 ms)Flushing 1588230740/ns: closing flushed file at 1733175116585Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3572455e: reopening flushed file at 1733175116594 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 27ms, sequenceid=6, compaction requested=false at 1733175116600 (+6 ms)Writing region close event to WAL at 1733175116601 (+1 ms)Running coprocessor post-close hooks at 1733175116604 (+3 ms)Closed at 1733175116604 2024-12-02T21:31:56,605 DEBUG [RS_CLOSE_META-regionserver/87c3fdb6c570:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:31:56,694 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=5, created chunk count=9, reused chunk count=78, reuseRatio=89.66% 2024-12-02T21:31:56,695 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-02T21:31:56,773 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(976): stopping server 87c3fdb6c570,37239,1733175115285; all regions closed. 2024-12-02T21:31:56,774 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,774 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,774 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,775 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,775 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741834_1010 (size=1152) 2024-12-02T21:31:56,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741834_1010 (size=1152) 2024-12-02T21:31:56,781 INFO [regionserver/87c3fdb6c570:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T21:31:56,781 INFO [regionserver/87c3fdb6c570:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T21:31:56,784 DEBUG [RS:0;87c3fdb6c570:37239 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/oldWALs 2024-12-02T21:31:56,784 INFO [RS:0;87c3fdb6c570:37239 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C37239%2C1733175115285.meta:.meta(num 1733175116296) 2024-12-02T21:31:56,785 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,785 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,785 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,786 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,786 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741833_1009 (size=93) 2024-12-02T21:31:56,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741833_1009 (size=93) 2024-12-02T21:31:56,790 DEBUG [RS:0;87c3fdb6c570:37239 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/oldWALs 2024-12-02T21:31:56,791 INFO [RS:0;87c3fdb6c570:37239 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 87c3fdb6c570%2C37239%2C1733175115285:(num 1733175115913) 2024-12-02T21:31:56,791 DEBUG [RS:0;87c3fdb6c570:37239 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:31:56,791 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:31:56,791 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:31:56,791 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.ChoreService(370): Chore service for: regionserver/87c3fdb6c570:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T21:31:56,791 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:31:56,791 INFO [regionserver/87c3fdb6c570:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:31:56,791 INFO [RS:0;87c3fdb6c570:37239 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37239 2024-12-02T21:31:56,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/87c3fdb6c570,37239,1733175115285 2024-12-02T21:31:56,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:31:56,802 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:31:56,803 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [87c3fdb6c570,37239,1733175115285] 2024-12-02T21:31:56,823 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/87c3fdb6c570,37239,1733175115285 already deleted, retry=false 2024-12-02T21:31:56,823 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 87c3fdb6c570,37239,1733175115285 expired; onlineServers=0 2024-12-02T21:31:56,823 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '87c3fdb6c570,44197,1733175115108' ***** 2024-12-02T21:31:56,824 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:31:56,824 INFO [M:0;87c3fdb6c570:44197 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T21:31:56,824 INFO [M:0;87c3fdb6c570:44197 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T21:31:56,824 DEBUG [M:0;87c3fdb6c570:44197 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:31:56,824 DEBUG [M:0;87c3fdb6c570:44197 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:31:56,824 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:31:56,824 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733175115618 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.large.0-1733175115618,5,FailOnTimeoutGroup] 2024-12-02T21:31:56,824 DEBUG [master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733175115619 {}] cleaner.HFileCleaner(306): Exit Thread[master/87c3fdb6c570:0:becomeActiveMaster-HFileCleaner.small.0-1733175115619,5,FailOnTimeoutGroup] 2024-12-02T21:31:56,824 INFO [M:0;87c3fdb6c570:44197 {}] hbase.ChoreService(370): Chore service for: master/87c3fdb6c570:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T21:31:56,824 INFO [M:0;87c3fdb6c570:44197 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T21:31:56,824 DEBUG [M:0;87c3fdb6c570:44197 {}] master.HMaster(1795): Stopping service threads 2024-12-02T21:31:56,824 INFO [M:0;87c3fdb6c570:44197 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:31:56,824 INFO [M:0;87c3fdb6c570:44197 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T21:31:56,824 INFO [M:0;87c3fdb6c570:44197 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:31:56,824 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:31:56,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:31:56,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:31:56,834 DEBUG [M:0;87c3fdb6c570:44197 {}] zookeeper.ZKUtil(347): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:31:56,834 WARN [M:0;87c3fdb6c570:44197 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:31:56,835 INFO [M:0;87c3fdb6c570:44197 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/.lastflushedseqids 2024-12-02T21:31:56,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741838_1014 (size=99) 2024-12-02T21:31:56,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741838_1014 (size=99) 2024-12-02T21:31:56,841 INFO [M:0;87c3fdb6c570:44197 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T21:31:56,842 INFO [M:0;87c3fdb6c570:44197 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:31:56,842 DEBUG [M:0;87c3fdb6c570:44197 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:31:56,842 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:56,842 DEBUG [M:0;87c3fdb6c570:44197 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:56,842 DEBUG [M:0;87c3fdb6c570:44197 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:31:56,842 DEBUG [M:0;87c3fdb6c570:44197 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:56,842 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-02T21:31:56,856 DEBUG [M:0;87c3fdb6c570:44197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/37bdde42043140e0bd9430803043ae6a is 82, key is hbase:meta,,1/info:regioninfo/1733175116324/Put/seqid=0 2024-12-02T21:31:56,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741839_1015 (size=5672) 2024-12-02T21:31:56,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741839_1015 (size=5672) 2024-12-02T21:31:56,860 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/37bdde42043140e0bd9430803043ae6a 2024-12-02T21:31:56,877 DEBUG [M:0;87c3fdb6c570:44197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d7776fce9dfc428eb37494dbff897892 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733175116404/Put/seqid=0 2024-12-02T21:31:56,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741840_1016 (size=5275) 2024-12-02T21:31:56,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741840_1016 (size=5275) 2024-12-02T21:31:56,881 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d7776fce9dfc428eb37494dbff897892 2024-12-02T21:31:56,897 DEBUG [M:0;87c3fdb6c570:44197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3783fa54306452f9daaee952c7e0981 is 69, key is 87c3fdb6c570,37239,1733175115285/rs:state/1733175115758/Put/seqid=0 2024-12-02T21:31:56,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741841_1017 (size=5156) 2024-12-02T21:31:56,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741841_1017 (size=5156) 2024-12-02T21:31:56,902 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3783fa54306452f9daaee952c7e0981 2024-12-02T21:31:56,913 INFO [RS:0;87c3fdb6c570:37239 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:31:56,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:56,913 INFO [RS:0;87c3fdb6c570:37239 {}] regionserver.HRegionServer(1031): Exiting; stopping=87c3fdb6c570,37239,1733175115285; zookeeper connection closed. 2024-12-02T21:31:56,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37239-0x10197f578ff0001, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:56,913 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@562e9035 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@562e9035 2024-12-02T21:31:56,914 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T21:31:56,919 DEBUG [M:0;87c3fdb6c570:44197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/08eb8ff06e5b43679337cd5e70e0a3cc is 52, key is load_balancer_on/state:d/1733175116526/Put/seqid=0 2024-12-02T21:31:56,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741842_1018 (size=5056) 2024-12-02T21:31:56,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741842_1018 (size=5056) 2024-12-02T21:31:56,924 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/08eb8ff06e5b43679337cd5e70e0a3cc 2024-12-02T21:31:56,928 DEBUG [M:0;87c3fdb6c570:44197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/37bdde42043140e0bd9430803043ae6a as hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/37bdde42043140e0bd9430803043ae6a 2024-12-02T21:31:56,932 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/37bdde42043140e0bd9430803043ae6a, entries=8, sequenceid=29, filesize=5.5 K 2024-12-02T21:31:56,933 DEBUG [M:0;87c3fdb6c570:44197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d7776fce9dfc428eb37494dbff897892 as hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d7776fce9dfc428eb37494dbff897892 2024-12-02T21:31:56,937 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d7776fce9dfc428eb37494dbff897892, entries=3, sequenceid=29, filesize=5.2 K 2024-12-02T21:31:56,937 DEBUG [M:0;87c3fdb6c570:44197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3783fa54306452f9daaee952c7e0981 as hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b3783fa54306452f9daaee952c7e0981 2024-12-02T21:31:56,941 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b3783fa54306452f9daaee952c7e0981, entries=1, sequenceid=29, filesize=5.0 K 2024-12-02T21:31:56,942 DEBUG [M:0;87c3fdb6c570:44197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/08eb8ff06e5b43679337cd5e70e0a3cc as hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/08eb8ff06e5b43679337cd5e70e0a3cc 2024-12-02T21:31:56,947 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35135/user/jenkins/test-data/be2d403d-afa2-0a89-0afc-3710e042ada0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/08eb8ff06e5b43679337cd5e70e0a3cc, entries=1, sequenceid=29, filesize=4.9 K 2024-12-02T21:31:56,947 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 105ms, sequenceid=29, compaction requested=false 2024-12-02T21:31:56,949 INFO [M:0;87c3fdb6c570:44197 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:31:56,949 DEBUG [M:0;87c3fdb6c570:44197 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733175116842Disabling compacts and flushes for region at 1733175116842Disabling writes for close at 1733175116842Obtaining lock to block concurrent updates at 1733175116842Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733175116842Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733175116843 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733175116843Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733175116843Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733175116855 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733175116855Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733175116864 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733175116876 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733175116876Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733175116884 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733175116897 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733175116897Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733175116906 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733175116918 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733175116918Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@633ee9e: reopening flushed file at 1733175116927 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f70d063: reopening flushed file at 1733175116933 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@469f5ba5: reopening flushed file at 1733175116937 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ecc58d9: reopening flushed file at 1733175116942 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 105ms, sequenceid=29, compaction requested=false at 1733175116947 (+5 ms)Writing region close event to WAL at 1733175116949 (+2 ms)Closed at 1733175116949 2024-12-02T21:31:56,949 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,949 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,949 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,949 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,949 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T21:31:56,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45351 is added to blk_1073741830_1006 (size=10311) 2024-12-02T21:31:56,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35999 is added to blk_1073741830_1006 (size=10311) 2024-12-02T21:31:56,952 INFO [M:0;87c3fdb6c570:44197 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T21:31:56,952 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T21:31:56,952 INFO [M:0;87c3fdb6c570:44197 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44197 2024-12-02T21:31:56,952 INFO [M:0;87c3fdb6c570:44197 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T21:31:56,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:56,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:31:57,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:57,065 INFO [M:0;87c3fdb6c570:44197 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T21:31:57,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44197-0x10197f578ff0000, quorum=127.0.0.1:52579, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:31:57,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e0b3b7c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:31:57,068 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@120711d3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:31:57,068 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:31:57,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f4abee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:31:57,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55931fd2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/hadoop.log.dir/,STOPPED} 2024-12-02T21:31:57,069 WARN [BP-1837138859-172.17.0.3-1733175112549 heartbeating to localhost/127.0.0.1:35135 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:31:57,069 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:31:57,069 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:31:57,069 WARN [BP-1837138859-172.17.0.3-1733175112549 heartbeating to localhost/127.0.0.1:35135 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1837138859-172.17.0.3-1733175112549 (Datanode Uuid 10c79739-4a7b-442f-bccc-e2c234a2e829) service to localhost/127.0.0.1:35135 2024-12-02T21:31:57,070 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a/data/data3/current/BP-1837138859-172.17.0.3-1733175112549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:57,070 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a/data/data4/current/BP-1837138859-172.17.0.3-1733175112549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:57,070 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:31:57,072 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@56fc288a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:31:57,073 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@541a748b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:31:57,073 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:31:57,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2601a9a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:31:57,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cb9612e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/hadoop.log.dir/,STOPPED} 2024-12-02T21:31:57,074 WARN [BP-1837138859-172.17.0.3-1733175112549 heartbeating to localhost/127.0.0.1:35135 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:31:57,074 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:31:57,074 WARN [BP-1837138859-172.17.0.3-1733175112549 heartbeating to localhost/127.0.0.1:35135 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1837138859-172.17.0.3-1733175112549 (Datanode Uuid d1003b37-d2fe-49a7-95a5-f21e53b246b3) service to localhost/127.0.0.1:35135 2024-12-02T21:31:57,074 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:31:57,075 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a/data/data1/current/BP-1837138859-172.17.0.3-1733175112549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:57,075 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/cluster_5a1f3374-19a0-a2c7-0048-99f448a0b08a/data/data2/current/BP-1837138859-172.17.0.3-1733175112549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:31:57,075 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:31:57,081 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c66b7d1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:31:57,082 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2c0b3275{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:31:57,082 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:31:57,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64e1b9c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:31:57,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31d39f87{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52d52759-fe82-7199-de29-dfb591555f6b/hadoop.log.dir/,STOPPED} 2024-12-02T21:31:57,089 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:31:57,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T21:31:57,114 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 232) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:35135 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35135 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35135 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35135 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35135 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35135 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35135 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35135 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=532 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=48 (was 48), ProcessCount=11 (was 11), AvailableMemoryMB=6583 (was 6589)